summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorg Brandl <georg@python.org>2012-04-11 12:46:24 +0200
committerGeorg Brandl <georg@python.org>2012-04-11 12:46:24 +0200
commitd113fbbdccda87a19675e624650dd02435bf7988 (patch)
treec3f0f368c5f2bfd5d750c0bb082c0b9744b70894
parentb2c574caec7279f6078b9ffecef84f4ed3370725 (diff)
parent25f9ec117a47548f90748ee892b2d5c5eb9e5c79 (diff)
downloadcpython-d113fbbdccda87a19675e624650dd02435bf7988.tar.gz
Merge 3.2.3 release clone.
-rw-r--r--Doc/conf.py7
-rw-r--r--Doc/faq/design.rst10
-rw-r--r--Doc/faq/programming.rst4
-rw-r--r--Doc/glossary.rst8
-rw-r--r--Doc/howto/advocacy.rst3
-rw-r--r--Doc/howto/cporting.rst111
-rw-r--r--Doc/howto/curses.rst6
-rw-r--r--Doc/howto/logging-cookbook.rst450
-rw-r--r--Doc/howto/logging.rst16
-rw-r--r--Doc/howto/regex.rst4
-rw-r--r--Doc/howto/urllib2.rst4
-rw-r--r--Doc/includes/capsulethunk.h134
-rw-r--r--Doc/library/argparse.rst9
-rw-r--r--Doc/library/email.encoders.rst4
-rw-r--r--Doc/library/functions.rst7
-rw-r--r--Doc/library/imp.rst4
-rw-r--r--Doc/library/json.rst8
-rw-r--r--Doc/library/logging.handlers.rst2
-rw-r--r--Doc/library/logging.rst21
-rw-r--r--Doc/library/markup.rst4
-rw-r--r--Doc/library/operator.rst2
-rw-r--r--Doc/library/re.rst142
-rw-r--r--Doc/library/signal.rst92
-rw-r--r--Doc/library/sqlite3.rst16
-rw-r--r--Doc/library/stdtypes.rst9
-rw-r--r--Doc/library/subprocess.rst2
-rw-r--r--Doc/library/syslog.rst3
-rw-r--r--Doc/library/threading.rst307
-rw-r--r--Doc/library/time.rst4
-rw-r--r--Doc/library/unittest.rst2
-rw-r--r--Doc/library/urllib.parse.rst7
-rw-r--r--Doc/library/urllib.request.rst103
-rw-r--r--Doc/library/webbrowser.rst2
-rw-r--r--Doc/library/xml.dom.minidom.rst8
-rw-r--r--Doc/library/xml.dom.pulldom.rst87
-rw-r--r--Doc/library/xml.etree.elementtree.rst13
-rw-r--r--Doc/reference/compound_stmts.rst5
-rw-r--r--Doc/tools/sphinxext/download.html6
-rw-r--r--Doc/tools/sphinxext/layout.html1
-rw-r--r--Doc/tools/sphinxext/pydoctheme/static/pydoctheme.css170
-rw-r--r--Doc/tools/sphinxext/pydoctheme/theme.conf23
-rw-r--r--Doc/tools/sphinxext/pyspecific.py4
-rw-r--r--Doc/tools/sphinxext/static/copybutton.js3
-rw-r--r--Doc/tools/sphinxext/static/sidebar.js155
-rw-r--r--Doc/tutorial/classes.rst5
-rw-r--r--Include/Python.h1
-rw-r--r--Include/patchlevel.h2
-rw-r--r--Lib/_weakrefset.py62
-rw-r--r--Lib/asyncore.py18
-rw-r--r--Lib/concurrent/futures/_base.py8
-rw-r--r--Lib/distutils/tests/test_bdist_msi.py10
-rw-r--r--Lib/distutils/tests/test_sdist.py3
-rw-r--r--Lib/doctest.py10
-rw-r--r--Lib/email/__init__.py1
-rw-r--r--Lib/email/feedparser.py2
-rw-r--r--Lib/email/generator.py4
-rw-r--r--Lib/email/header.py7
-rw-r--r--Lib/email/parser.py2
-rw-r--r--Lib/email/test/test_email.py59
-rw-r--r--Lib/http/server.py7
-rw-r--r--Lib/idlelib/CallTipWindow.py6
-rw-r--r--Lib/idlelib/NEWS.txt12
-rw-r--r--Lib/idlelib/PyShell.py6
-rw-r--r--Lib/idlelib/configHandler.py2
-rw-r--r--Lib/idlelib/tabbedpages.py4
-rw-r--r--Lib/lib2to3/tests/test_parser.py13
-rw-r--r--Lib/logging/__init__.py30
-rw-r--r--Lib/logging/handlers.py94
-rw-r--r--Lib/multiprocessing/connection.py13
-rwxr-xr-xLib/pydoc.py4
-rw-r--r--Lib/re.py11
-rw-r--r--Lib/rlcompleter.py36
-rw-r--r--Lib/socket.py11
-rw-r--r--Lib/socketserver.py15
-rw-r--r--Lib/subprocess.py12
-rwxr-xr-xLib/test/regrtest.py27
-rw-r--r--Lib/test/test_aifc.py163
-rw-r--r--Lib/test/test_ast.py12
-rw-r--r--Lib/test/test_asyncore.py20
-rw-r--r--Lib/test/test_base64.py5
-rw-r--r--Lib/test/test_cgi.py5
-rw-r--r--Lib/test/test_concurrent_futures.py18
-rw-r--r--Lib/test/test_descr.py22
-rw-r--r--Lib/test/test_dict.py20
-rw-r--r--Lib/test/test_exceptions.py2
-rw-r--r--Lib/test/test_fractions.py22
-rw-r--r--Lib/test/test_mailbox.py54
-rw-r--r--Lib/test/test_marshal.py30
-rw-r--r--Lib/test/test_minidom.py26
-rw-r--r--Lib/test/test_multiprocessing.py20
-rw-r--r--Lib/test/test_queue.py8
-rw-r--r--Lib/test/test_re.py50
-rw-r--r--Lib/test/test_socket.py1
-rw-r--r--Lib/test/test_socketserver.py39
-rw-r--r--Lib/test/test_strptime.py31
-rw-r--r--Lib/test/test_subprocess.py67
-rw-r--r--Lib/test/test_thread.py23
-rw-r--r--Lib/test/test_threading.py1
-rw-r--r--Lib/test/test_tools.py108
-rw-r--r--Lib/test/test_unicode.py15
-rw-r--r--Lib/test/test_weakref.py60
-rw-r--r--Lib/test/test_weakset.py103
-rw-r--r--Lib/test/test_zlib.py23
-rw-r--r--Lib/tkinter/ttk.py2
-rw-r--r--Lib/unittest/loader.py13
-rw-r--r--Lib/urllib/request.py5
-rw-r--r--Lib/urllib/response.py2
-rw-r--r--Lib/weakref.py4
-rw-r--r--Lib/xmlrpc/server.py2
-rw-r--r--Mac/README10
-rw-r--r--Makefile.pre.in7
-rw-r--r--Misc/ACKS7
-rw-r--r--Misc/NEWS165
-rw-r--r--Modules/_io/_iomodule.c2
-rw-r--r--Modules/_io/_iomodule.h2
-rw-r--r--Modules/_io/textio.c2
-rw-r--r--Modules/_posixsubprocess.c13
-rw-r--r--Modules/_sre.c80
-rw-r--r--Modules/_threadmodule.c3
-rw-r--r--Modules/mathmodule.c14
-rw-r--r--Modules/python.c6
-rw-r--r--Modules/sre.h2
-rw-r--r--Modules/timemodule.c5
-rw-r--r--Objects/accu.c1
-rw-r--r--Objects/bytearrayobject.c8
-rw-r--r--Objects/bytesobject.c2
-rw-r--r--Objects/dictobject.c112
-rw-r--r--Objects/listobject.c1
-rw-r--r--Objects/tupleobject.c1
-rw-r--r--Objects/typeobject.c39
-rw-r--r--Objects/unicodeobject.c6
-rw-r--r--PC/_subprocess.c1
-rw-r--r--PC/pyconfig.h2
-rw-r--r--PC/winreg.c2
-rw-r--r--Python/ast.c10
-rw-r--r--Python/future.c11
-rw-r--r--Python/import.c166
-rw-r--r--Python/marshal.c16
-rw-r--r--Python/pythonrun.c51
-rw-r--r--Python/thread_pthread.h20
-rw-r--r--Tools/msi/msi.py1
-rwxr-xr-xTools/scripts/abitype.py88
-rwxr-xr-xTools/scripts/find_recursionlimit.py24
-rwxr-xr-xTools/scripts/findnocoding.py54
-rwxr-xr-xTools/scripts/fixcid.py2
-rwxr-xr-xTools/scripts/md5sum.py2
-rwxr-xr-xTools/scripts/parseentities.py3
-rwxr-xr-xTools/scripts/pdeps.py10
-rwxr-xr-xconfigure2
-rw-r--r--configure.ac (renamed from configure.in)0
-rw-r--r--pyconfig.h.in2
-rw-r--r--setup.py2
152 files changed, 3400 insertions, 1012 deletions
diff --git a/Doc/conf.py b/Doc/conf.py
index 85eb9fa3bb..555f281b5c 100644
--- a/Doc/conf.py
+++ b/Doc/conf.py
@@ -65,9 +65,12 @@ highlight_language = 'python3'
# Options for HTML output
# -----------------------
-html_theme = 'default'
+html_theme = 'pydoctheme'
+html_theme_path = ['tools/sphinxext']
html_theme_options = {'collapsiblesidebar': True}
+html_short_title = '%s Documentation' % release
+
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
@@ -88,7 +91,7 @@ html_additional_pages = {
}
# Output an OpenSearch description file.
-html_use_opensearch = 'http://docs.python.org/dev/py3k'
+html_use_opensearch = 'http://docs.python.org/3.2'
# Additional static files.
html_static_path = ['tools/sphinxext/static']
diff --git a/Doc/faq/design.rst b/Doc/faq/design.rst
index e45aaaacb6..87cc942f31 100644
--- a/Doc/faq/design.rst
+++ b/Doc/faq/design.rst
@@ -284,8 +284,9 @@ Similar methods exist for bytes and bytearray objects.
How fast are exceptions?
------------------------
-A try/except block is extremely efficient. Actually catching an exception is
-expensive. In versions of Python prior to 2.0 it was common to use this idiom::
+A try/except block is extremely efficient if no exceptions are raised. Actually
+catching an exception is expensive. In versions of Python prior to 2.0 it was
+common to use this idiom::
try:
value = mydict[key]
@@ -296,11 +297,10 @@ expensive. In versions of Python prior to 2.0 it was common to use this idiom::
This only made sense when you expected the dict to have the key almost all the
time. If that wasn't the case, you coded it like this::
- if mydict.has_key(key):
+ if key in mydict:
value = mydict[key]
else:
- mydict[key] = getvalue(key)
- value = mydict[key]
+ value = mydict[key] = getvalue(key)
For this specific case, you could also use ``value = dict.setdefault(key,
getvalue(key))``, but only if the ``getvalue()`` call is cheap enough because it
diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst
index 5de36764c5..d33e25be85 100644
--- a/Doc/faq/programming.rst
+++ b/Doc/faq/programming.rst
@@ -794,9 +794,9 @@ My program is too slow. How do I speed it up?
That's a tough one, in general. First, here are a list of things to
remember before diving further:
-* Performance characteristics vary accross Python implementations. This FAQ
+* Performance characteristics vary across Python implementations. This FAQ
focusses on :term:`CPython`.
-* Behaviour can vary accross operating systems, especially when talking about
+* Behaviour can vary across operating systems, especially when talking about
I/O or multi-threading.
* You should always find the hot spots in your program *before* attempting to
optimize any code (see the :mod:`profile` module).
diff --git a/Doc/glossary.rst b/Doc/glossary.rst
index 3b211aedef..21b92a99a1 100644
--- a/Doc/glossary.rst
+++ b/Doc/glossary.rst
@@ -146,9 +146,9 @@ Glossary
For more information about descriptors' methods, see :ref:`descriptors`.
dictionary
- An associative array, where arbitrary keys are mapped to values. The keys
- can be any object with :meth:`__hash__` function and :meth:`__eq__`
- methods. Called a hash in Perl.
+ An associative array, where arbitrary keys are mapped to values. The
+ keys can be any object with :meth:`__hash__` and :meth:`__eq__` methods.
+ Called a hash in Perl.
docstring
A string literal which appears as the first expression in a class,
@@ -385,7 +385,7 @@ Glossary
:meth:`str.lower` method can serve as a key function for case insensitive
sorts. Alternatively, an ad-hoc key function can be built from a
:keyword:`lambda` expression such as ``lambda r: (r[0], r[2])``. Also,
- the :mod:`operator` module provides three key function constuctors:
+ the :mod:`operator` module provides three key function constructors:
:func:`~operator.attrgetter`, :func:`~operator.itemgetter`, and
:func:`~operator.methodcaller`. See the :ref:`Sorting HOW TO
<sortinghowto>` for examples of how to create and use key functions.
diff --git a/Doc/howto/advocacy.rst b/Doc/howto/advocacy.rst
index e67e201702..2969d266ad 100644
--- a/Doc/howto/advocacy.rst
+++ b/Doc/howto/advocacy.rst
@@ -264,8 +264,7 @@ the organizations that use Python.
**What are the restrictions on Python's use?**
-They're practically nonexistent. Consult the :file:`Misc/COPYRIGHT` file in the
-source distribution, or the section :ref:`history-and-license` for the full
+They're practically nonexistent. Consult :ref:`history-and-license` for the full
language, but it boils down to three conditions:
* You have to leave the copyright notice on the software; if you don't include
diff --git a/Doc/howto/cporting.rst b/Doc/howto/cporting.rst
index 71844969b6..6dd0765bd3 100644
--- a/Doc/howto/cporting.rst
+++ b/Doc/howto/cporting.rst
@@ -2,27 +2,28 @@
.. _cporting-howto:
-********************************
-Porting Extension Modules to 3.0
-********************************
+*************************************
+Porting Extension Modules to Python 3
+*************************************
:author: Benjamin Peterson
.. topic:: Abstract
- Although changing the C-API was not one of Python 3.0's objectives, the many
- Python level changes made leaving 2.x's API intact impossible. In fact, some
- changes such as :func:`int` and :func:`long` unification are more obvious on
- the C level. This document endeavors to document incompatibilities and how
- they can be worked around.
+ Although changing the C-API was not one of Python 3's objectives,
+ the many Python-level changes made leaving Python 2's API intact
+ impossible. In fact, some changes such as :func:`int` and
+ :func:`long` unification are more obvious on the C level. This
+ document endeavors to document incompatibilities and how they can
+ be worked around.
Conditional compilation
=======================
-The easiest way to compile only some code for 3.0 is to check if
-:c:macro:`PY_MAJOR_VERSION` is greater than or equal to 3. ::
+The easiest way to compile only some code for Python 3 is to check
+if :c:macro:`PY_MAJOR_VERSION` is greater than or equal to 3. ::
#if PY_MAJOR_VERSION >= 3
#define IS_PY3K
@@ -35,7 +36,7 @@ conditional blocks.
Changes to Object APIs
======================
-Python 3.0 merged together some types with similar functions while cleanly
+Python 3 merged together some types with similar functions while cleanly
separating others.
@@ -43,14 +44,14 @@ str/unicode Unification
-----------------------
-Python 3.0's :func:`str` (``PyString_*`` functions in C) type is equivalent to
-2.x's :func:`unicode` (``PyUnicode_*``). The old 8-bit string type has become
-:func:`bytes`. Python 2.6 and later provide a compatibility header,
+Python 3's :func:`str` (``PyString_*`` functions in C) type is equivalent to
+Python 2's :func:`unicode` (``PyUnicode_*``). The old 8-bit string type has
+become :func:`bytes`. Python 2.6 and later provide a compatibility header,
:file:`bytesobject.h`, mapping ``PyBytes`` names to ``PyString`` ones. For best
-compatibility with 3.0, :c:type:`PyUnicode` should be used for textual data and
+compatibility with Python 3, :c:type:`PyUnicode` should be used for textual data and
:c:type:`PyBytes` for binary data. It's also important to remember that
-:c:type:`PyBytes` and :c:type:`PyUnicode` in 3.0 are not interchangeable like
-:c:type:`PyString` and :c:type:`PyUnicode` are in 2.x. The following example
+:c:type:`PyBytes` and :c:type:`PyUnicode` in Python 3 are not interchangeable like
+:c:type:`PyString` and :c:type:`PyUnicode` are in Python 2. The following example
shows best practices with regards to :c:type:`PyUnicode`, :c:type:`PyString`,
and :c:type:`PyBytes`. ::
@@ -94,10 +95,12 @@ and :c:type:`PyBytes`. ::
long/int Unification
--------------------
-In Python 3.0, there is only one integer type. It is called :func:`int` on the
-Python level, but actually corresponds to 2.x's :func:`long` type. In the
-C-API, ``PyInt_*`` functions are replaced by their ``PyLong_*`` neighbors. The
-best course of action here is using the ``PyInt_*`` functions aliased to
+Python 3 has only one integer type, :func:`int`. But it actually
+corresponds to Python 2's :func:`long` type--the :func:`int` type
+used in Python 2 was removed. In the C-API, ``PyInt_*`` functions
+are replaced by their ``PyLong_*`` equivalents.
+
+The best course of action here is using the ``PyInt_*`` functions aliased to
``PyLong_*`` found in :file:`intobject.h`. The abstract ``PyNumber_*`` APIs
can also be used in some cases. ::
@@ -120,10 +123,11 @@ can also be used in some cases. ::
Module initialization and state
===============================
-Python 3.0 has a revamped extension module initialization system. (See
-:pep:`3121`.) Instead of storing module state in globals, they should be stored
-in an interpreter specific structure. Creating modules that act correctly in
-both 2.x and 3.0 is tricky. The following simple example demonstrates how. ::
+Python 3 has a revamped extension module initialization system. (See
+:pep:`3121`.) Instead of storing module state in globals, they should
+be stored in an interpreter specific structure. Creating modules that
+act correctly in both Python 2 and Python 3 is tricky. The following
+simple example demonstrates how. ::
#include "Python.h"
@@ -209,10 +213,65 @@ both 2.x and 3.0 is tricky. The following simple example demonstrates how. ::
}
+CObject replaced with Capsule
+=============================
+
+The :c:type:`Capsule` object was introduced in Python 3.1 and 2.7 to replace
+:c:type:`CObject`. CObjects were useful,
+but the :c:type:`CObject` API was problematic: it didn't permit distinguishing
+between valid CObjects, which allowed mismatched CObjects to crash the
+interpreter, and some of its APIs relied on undefined behavior in C.
+(For further reading on the rationale behind Capsules, please see :issue:`5630`.)
+
+If you're currently using CObjects, and you want to migrate to 3.1 or newer,
+you'll need to switch to Capsules.
+:c:type:`CObject` was deprecated in 3.1 and 2.7 and completely removed in
+Python 3.2. If you only support 2.7, or 3.1 and above, you
+can simply switch to :c:type:`Capsule`. If you need to support Python 3.0,
+or versions of Python earlier than 2.7,
+you'll have to support both CObjects and Capsules.
+(Note that Python 3.0 is no longer supported, and it is not recommended
+for production use.)
+
+The following example header file :file:`capsulethunk.h` may
+solve the problem for you. Simply write your code against the
+:c:type:`Capsule` API and include this header file after
+:file:`Python.h`. Your code will automatically use Capsules
+in versions of Python with Capsules, and switch to CObjects
+when Capsules are unavailable.
+
+:file:`capsulethunk.h` simulates Capsules using CObjects. However,
+:c:type:`CObject` provides no place to store the capsule's "name". As a
+result the simulated :c:type:`Capsule` objects created by :file:`capsulethunk.h`
+behave slightly differently from real Capsules. Specifically:
+
+ * The name parameter passed in to :c:func:`PyCapsule_New` is ignored.
+
+ * The name parameter passed in to :c:func:`PyCapsule_IsValid` and
+ :c:func:`PyCapsule_GetPointer` is ignored, and no error checking
+ of the name is performed.
+
+ * :c:func:`PyCapsule_GetName` always returns NULL.
+
+ * :c:func:`PyCapsule_SetName` always throws an exception and
+ returns failure. (Since there's no way to store a name
+ in a CObject, noisy failure of :c:func:`PyCapsule_SetName`
+ was deemed preferable to silent failure here. If this is
+ inconvenient, feel free to modify your local
+ copy as you see fit.)
+
+You can find :file:`capsulethunk.h` in the Python source distribution
+as :source:`Doc/includes/capsulethunk.h`. We also include it here for
+your convenience:
+
+.. literalinclude:: ../includes/capsulethunk.h
+
+
+
Other options
=============
If you are writing a new extension module, you might consider `Cython
<http://www.cython.org>`_. It translates a Python-like language to C. The
-extension modules it creates are compatible with Python 3.x and 2.x.
+extension modules it creates are compatible with Python 3 and Python 2.
diff --git a/Doc/howto/curses.rst b/Doc/howto/curses.rst
index 53ef7deb9d..1b14ceb6bd 100644
--- a/Doc/howto/curses.rst
+++ b/Doc/howto/curses.rst
@@ -118,7 +118,7 @@ function to restore the terminal to its original operating mode. ::
A common problem when debugging a curses application is to get your terminal
messed up when the application dies without restoring the terminal to its
previous state. In Python this commonly happens when your code is buggy and
-raises an uncaught exception. Keys are no longer be echoed to the screen when
+raises an uncaught exception. Keys are no longer echoed to the screen when
you type them, for example, which makes using the shell difficult.
In Python you can avoid these complications and make debugging much easier by
@@ -271,7 +271,7 @@ application are commonly shown in reverse video; a text viewer may need to
highlight certain words. curses supports this by allowing you to specify an
attribute for each cell on the screen.
-An attribute is a integer, each bit representing a different attribute. You can
+An attribute is an integer, each bit representing a different attribute. You can
try to display text with multiple attribute bits set, but curses doesn't
guarantee that all the possible combinations are available, or that they're all
visually distinct. That depends on the ability of the terminal being used, so
@@ -300,7 +300,7 @@ could code::
curses.A_REVERSE)
stdscr.refresh()
-The curses library also supports color on those terminals that provide it, The
+The curses library also supports color on those terminals that provide it. The
most common such terminal is probably the Linux console, followed by color
xterms.
diff --git a/Doc/howto/logging-cookbook.rst b/Doc/howto/logging-cookbook.rst
index 7dc80215d8..7ee641235a 100644
--- a/Doc/howto/logging-cookbook.rst
+++ b/Doc/howto/logging-cookbook.rst
@@ -268,12 +268,12 @@ Dealing with handlers that block
.. currentmodule:: logging.handlers
Sometimes you have to get your logging handlers to do their work without
-blocking the thread you’re logging from. This is common in Web applications,
+blocking the thread you're logging from. This is common in Web applications,
though of course it also occurs in other scenarios.
A common culprit which demonstrates sluggish behaviour is the
:class:`SMTPHandler`: sending emails can take a long time, for a
-number of reasons outside the developer’s control (for example, a poorly
+number of reasons outside the developer's control (for example, a poorly
performing mail or network infrastructure). But almost any network-based
handler can block: Even a :class:`SocketHandler` operation may do a
DNS query under the hood which is too slow (and this query can be deep in the
@@ -292,7 +292,7 @@ developers who will use your code.
The second part of the solution is :class:`QueueListener`, which has been
designed as the counterpart to :class:`QueueHandler`. A
-:class:`QueueListener` is very simple: it’s passed a queue and some handlers,
+:class:`QueueListener` is very simple: it's passed a queue and some handlers,
and it fires up an internal thread which listens to its queue for LogRecords
sent from ``QueueHandlers`` (or any other source of ``LogRecords``, for that
matter). The ``LogRecords`` are removed from the queue and passed to the
@@ -745,7 +745,7 @@ the basis for code meeting your own specific requirements::
raise
except:
import sys, traceback
- print >> sys.stderr, 'Whoops! Problem:'
+ print('Whoops! Problem:', file=sys.stderr)
traceback.print_exc(file=sys.stderr)
# Arrays used for random selections in this demo
@@ -964,6 +964,219 @@ and each time it reaches the size limit it is renamed with the suffix
Obviously this example sets the log length much too small as an extreme
example. You would want to set *maxBytes* to an appropriate value.
+.. _format-styles:
+
+Use of alternative formatting styles
+------------------------------------
+
+When logging was added to the Python standard library, the only way of
+formatting messages with variable content was to use the %-formatting
+method. Since then, Python has gained two new formatting approaches:
+:class:`string.Template` (added in Python 2.4) and :meth:`str.format`
+(added in Python 2.6).
+
+Logging (as of 3.2) provides improved support for these two additional
+formatting styles. The :class:`Formatter` class been enhanced to take an
+additional, optional keyword parameter named ``style``. This defaults to
+``'%'``, but other possible values are ``'{'`` and ``'$'``, which correspond
+to the other two formatting styles. Backwards compatibility is maintained by
+default (as you would expect), but by explicitly specifying a style parameter,
+you get the ability to specify format strings which work with
+:meth:`str.format` or :class:`string.Template`. Here's an example console
+session to show the possibilities:
+
+.. code-block:: pycon
+
+ >>> import logging
+ >>> root = logging.getLogger()
+ >>> root.setLevel(logging.DEBUG)
+ >>> handler = logging.StreamHandler()
+ >>> bf = logging.Formatter('{asctime} {name} {levelname:8s} {message}',
+ ... style='{')
+ >>> handler.setFormatter(bf)
+ >>> root.addHandler(handler)
+ >>> logger = logging.getLogger('foo.bar')
+ >>> logger.debug('This is a DEBUG message')
+ 2010-10-28 15:11:55,341 foo.bar DEBUG This is a DEBUG message
+ >>> logger.critical('This is a CRITICAL message')
+ 2010-10-28 15:12:11,526 foo.bar CRITICAL This is a CRITICAL message
+ >>> df = logging.Formatter('$asctime $name ${levelname} $message',
+ ... style='$')
+ >>> handler.setFormatter(df)
+ >>> logger.debug('This is a DEBUG message')
+ 2010-10-28 15:13:06,924 foo.bar DEBUG This is a DEBUG message
+ >>> logger.critical('This is a CRITICAL message')
+ 2010-10-28 15:13:11,494 foo.bar CRITICAL This is a CRITICAL message
+ >>>
+
+Note that the formatting of logging messages for final output to logs is
+completely independent of how an individual logging message is constructed.
+That can still use %-formatting, as shown here::
+
+ >>> logger.error('This is an%s %s %s', 'other,', 'ERROR,', 'message')
+ 2010-10-28 15:19:29,833 foo.bar ERROR This is another, ERROR, message
+ >>>
+
+Logging calls (``logger.debug()``, ``logger.info()`` etc.) only take
+positional parameters for the actual logging message itself, with keyword
+parameters used only for determining options for how to handle the actual
+logging call (e.g. the ``exc_info`` keyword parameter to indicate that
+traceback information should be logged, or the ``extra`` keyword parameter
+to indicate additional contextual information to be added to the log). So
+you cannot directly make logging calls using :meth:`str.format` or
+:class:`string.Template` syntax, because internally the logging package
+uses %-formatting to merge the format string and the variable arguments.
+There would no changing this while preserving backward compatibility, since
+all logging calls which are out there in existing code will be using %-format
+strings.
+
+There is, however, a way that you can use {}- and $- formatting to construct
+your individual log messages. Recall that for a message you can use an
+arbitrary object as a message format string, and that the logging package will
+call ``str()`` on that object to get the actual format string. Consider the
+following two classes::
+
+ class BraceMessage(object):
+ def __init__(self, fmt, *args, **kwargs):
+ self.fmt = fmt
+ self.args = args
+ self.kwargs = kwargs
+
+ def __str__(self):
+ return self.fmt.format(*self.args, **self.kwargs)
+
+ class DollarMessage(object):
+ def __init__(self, fmt, **kwargs):
+ self.fmt = fmt
+ self.kwargs = kwargs
+
+ def __str__(self):
+ from string import Template
+ return Template(self.fmt).substitute(**self.kwargs)
+
+Either of these can be used in place of a format string, to allow {}- or
+$-formatting to be used to build the actual "message" part which appears in the
+formatted log output in place of "%(message)s" or "{message}" or "$message".
+It's a little unwieldy to use the class names whenever you want to log
+something, but it's quite palatable if you use an alias such as __ (double
+underscore – not to be confused with _, the single underscore used as a
+synonym/alias for :func:`gettext.gettext` or its brethren).
+
+The above classes are not included in Python, though they're easy enough to
+copy and paste into your own code. They can be used as follows (assuming that
+they're declared in a module called ``wherever``):
+
+.. code-block:: pycon
+
+ >>> from wherever import BraceMessage as __
+ >>> print(__('Message with {0} {name}', 2, name='placeholders'))
+ Message with 2 placeholders
+ >>> class Point: pass
+ ...
+ >>> p = Point()
+ >>> p.x = 0.5
+ >>> p.y = 0.5
+ >>> print(__('Message with coordinates: ({point.x:.2f}, {point.y:.2f})',
+ ... point=p))
+ Message with coordinates: (0.50, 0.50)
+ >>> from wherever import DollarMessage as __
+ >>> print(__('Message with $num $what', num=2, what='placeholders'))
+ Message with 2 placeholders
+ >>>
+
+While the above examples use ``print()`` to show how the formatting works, you
+would of course use ``logger.debug()`` or similar to actually log using this
+approach.
+
+One thing to note is that you pay no significant performance penalty with this
+approach: the actual formatting happens not when you make the logging call, but
+when (and if) the logged message is actually about to be output to a log by a
+handler. So the only slightly unusual thing which might trip you up is that the
+parentheses go around the format string and the arguments, not just the format
+string. That's because the __ notation is just syntax sugar for a constructor
+call to one of the XXXMessage classes.
+
+
+.. currentmodule:: logging
+
+.. _custom-logrecord:
+
+Customising ``LogRecord``
+-------------------------
+
+Every logging event is represented by a :class:`LogRecord` instance.
+When an event is logged and not filtered out by a logger's level, a
+:class:`LogRecord` is created, populated with information about the event and
+then passed to the handlers for that logger (and its ancestors, up to and
+including the logger where further propagation up the hierarchy is disabled).
+Before Python 3.2, there were only two places where this creation was done:
+
+* :meth:`Logger.makeRecord`, which is called in the normal process of
+ logging an event. This invoked :class:`LogRecord` directly to create an
+ instance.
+* :func:`makeLogRecord`, which is called with a dictionary containing
+ attributes to be added to the LogRecord. This is typically invoked when a
+ suitable dictionary has been received over the network (e.g. in pickle form
+ via a :class:`~handlers.SocketHandler`, or in JSON form via an
+ :class:`~handlers.HTTPHandler`).
+
+This has usually meant that if you need to do anything special with a
+:class:`LogRecord`, you've had to do one of the following.
+
+* Create your own :class:`Logger` subclass, which overrides
+ :meth:`Logger.makeRecord`, and set it using :func:`~logging.setLoggerClass`
+ before any loggers that you care about are instantiated.
+* Add a :class:`Filter` to a logger or handler, which does the
+ necessary special manipulation you need when its
+ :meth:`~Filter.filter` method is called.
+
+The first approach would be a little unwieldy in the scenario where (say)
+several different libraries wanted to do different things. Each would attempt
+to set its own :class:`Logger` subclass, and the one which did this last would
+win.
+
+The second approach works reasonably well for many cases, but does not allow
+you to e.g. use a specialized subclass of :class:`LogRecord`. Library
+developers can set a suitable filter on their loggers, but they would have to
+remember to do this every time they introduced a new logger (which they would
+do simply by adding new packages or modules and doing ::
+
+ logger = logging.getLogger(__name__)
+
+at module level). It's probably one too many things to think about. Developers
+could also add the filter to a :class:`~logging.NullHandler` attached to their
+top-level logger, but this would not be invoked if an application developer
+attached a handler to a lower-level library logger – so output from that
+handler would not reflect the intentions of the library developer.
+
+In Python 3.2 and later, :class:`~logging.LogRecord` creation is done through a
+factory, which you can specify. The factory is just a callable you can set with
+:func:`~logging.setLogRecordFactory`, and interrogate with
+:func:`~logging.getLogRecordFactory`. The factory is invoked with the same
+signature as the :class:`~logging.LogRecord` constructor, as :class:`LogRecord`
+is the default setting for the factory.
+
+This approach allows a custom factory to control all aspects of LogRecord
+creation. For example, you could return a subclass, or just add some additional
+attributes to the record once created, using a pattern similar to this::
+
+ old_factory = logging.getLogRecordFactory()
+
+ def record_factory(*args, **kwargs):
+ record = old_factory(*args, **kwargs)
+ record.custom_attribute = 0xdecafbad
+ return record
+
+ logging.setLogRecordFactory(record_factory)
+
+This pattern allows different libraries to chain factories together, and as
+long as they don't overwrite each other's attributes or unintentionally
+overwrite the attributes provided as standard, there should be no surprises.
+However, it should be borne in mind that each link in the chain adds run-time
+overhead to all logging operations, and the technique should only be used when
+the use of a :class:`Filter` does not provide the desired result.
+
+
.. _zeromq-handlers:
Subclassing QueueHandler - a ZeroMQ example
@@ -1102,3 +1315,232 @@ This dictionary is passed to :func:`~logging.config.dictConfig` to put the confi
For more information about this configuration, you can see the `relevant
section <https://docs.djangoproject.com/en/1.3/topics/logging/#configuring-logging>`_
of the Django documentation.
+
+A more elaborate multiprocessing example
+----------------------------------------
+
+The following working example shows how logging can be used with multiprocessing
+using configuration files. The configurations are fairly simple, but serve to
+illustrate how more complex ones could be implemented in a real multiprocessing
+scenario.
+
+In the example, the main process spawns a listener process and some worker
+processes. Each of the main process, the listener and the workers have three
+separate configurations (the workers all share the same configuration). We can
+see logging in the main process, how the workers log to a QueueHandler and how
+the listener implements a QueueListener and a more complex logging
+configuration, and arranges to dispatch events received via the queue to the
+handlers specified in the configuration. Note that these configurations are
+purely illustrative, but you should be able to adapt this example to your own
+scenario.
+
+Here's the script - the docstrings and the comments hopefully explain how it
+works::
+
+ import logging
+ import logging.config
+ import logging.handlers
+ from multiprocessing import Process, Queue, Event, current_process
+ import os
+ import random
+ import time
+
+ class MyHandler(object):
+ """
+ A simple handler for logging events. It runs in the listener process and
+ dispatches events to loggers based on the name in the received record,
+ which then get dispatched, by the logging system, to the handlers
+ configured for those loggers.
+ """
+ def handle(self, record):
+ logger = logging.getLogger(record.name)
+ # The process name is transformed just to show that it's the listener
+ # doing the logging to files and console
+ record.processName = '%s (for %s)' % (current_process().name, record.processName)
+ logger.handle(record)
+
+ def listener_process(q, stop_event, config):
+ """
+ This could be done in the main process, but is just done in a separate
+ process for illustrative purposes.
+
+ This initialises logging according to the specified configuration,
+ starts the listener and waits for the main process to signal completion
+ via the event. The listener is then stopped, and the process exits.
+ """
+ logging.config.dictConfig(config)
+ listener = logging.handlers.QueueListener(q, MyHandler())
+ listener.start()
+ if os.name == 'posix':
+ # On POSIX, the setup logger will have been configured in the
+ # parent process, but should have been disabled following the
+ # dictConfig call.
+ # On Windows, since fork isn't used, the setup logger won't
+ # exist in the child, so it would be created and the message
+ # would appear - hence the "if posix" clause.
+ logger = logging.getLogger('setup')
+ logger.critical('Should not appear, because of disabled logger ...')
+ stop_event.wait()
+ listener.stop()
+
+ def worker_process(config):
+ """
+ A number of these are spawned for the purpose of illustration. In
+ practice, they could be a heterogenous bunch of processes rather than
+ ones which are identical to each other.
+
+ This initialises logging according to the specified configuration,
+ and logs a hundred messages with random levels to randomly selected
+ loggers.
+
+ A small sleep is added to allow other processes a chance to run. This
+ is not strictly needed, but it mixes the output from the different
+ processes a bit more than if it's left out.
+ """
+ logging.config.dictConfig(config)
+ levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
+ logging.CRITICAL]
+ loggers = ['foo', 'foo.bar', 'foo.bar.baz',
+ 'spam', 'spam.ham', 'spam.ham.eggs']
+ if os.name == 'posix':
+ # On POSIX, the setup logger will have been configured in the
+ # parent process, but should have been disabled following the
+ # dictConfig call.
+ # On Windows, since fork isn't used, the setup logger won't
+ # exist in the child, so it would be created and the message
+ # would appear - hence the "if posix" clause.
+ logger = logging.getLogger('setup')
+ logger.critical('Should not appear, because of disabled logger ...')
+ for i in range(100):
+ lvl = random.choice(levels)
+ logger = logging.getLogger(random.choice(loggers))
+ logger.log(lvl, 'Message no. %d', i)
+ time.sleep(0.01)
+
+ def main():
+ q = Queue()
+ # The main process gets a simple configuration which prints to the console.
+ config_initial = {
+ 'version': 1,
+ 'formatters': {
+ 'detailed': {
+ 'class': 'logging.Formatter',
+ 'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
+ }
+ },
+ 'handlers': {
+ 'console': {
+ 'class': 'logging.StreamHandler',
+ 'level': 'INFO',
+ },
+ },
+ 'root': {
+ 'level': 'DEBUG',
+ 'handlers': ['console']
+ },
+ }
+ # The worker process configuration is just a QueueHandler attached to the
+ # root logger, which allows all messages to be sent to the queue.
+ # We disable existing loggers to disable the "setup" logger used in the
+ # parent process. This is needed on POSIX because the logger will
+ # be there in the child following a fork().
+ config_worker = {
+ 'version': 1,
+ 'disable_existing_loggers': True,
+ 'handlers': {
+ 'queue': {
+ 'class': 'logging.handlers.QueueHandler',
+ 'queue': q,
+ },
+ },
+ 'root': {
+ 'level': 'DEBUG',
+ 'handlers': ['queue']
+ },
+ }
+ # The listener process configuration shows that the full flexibility of
+ # logging configuration is available to dispatch events to handlers however
+ # you want.
+ # We disable existing loggers to disable the "setup" logger used in the
+ # parent process. This is needed on POSIX because the logger will
+ # be there in the child following a fork().
+ config_listener = {
+ 'version': 1,
+ 'disable_existing_loggers': True,
+ 'formatters': {
+ 'detailed': {
+ 'class': 'logging.Formatter',
+ 'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
+ },
+ 'simple': {
+ 'class': 'logging.Formatter',
+ 'format': '%(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
+ }
+ },
+ 'handlers': {
+ 'console': {
+ 'class': 'logging.StreamHandler',
+ 'level': 'INFO',
+ 'formatter': 'simple',
+ },
+ 'file': {
+ 'class': 'logging.FileHandler',
+ 'filename': 'mplog.log',
+ 'mode': 'w',
+ 'formatter': 'detailed',
+ },
+ 'foofile': {
+ 'class': 'logging.FileHandler',
+ 'filename': 'mplog-foo.log',
+ 'mode': 'w',
+ 'formatter': 'detailed',
+ },
+ 'errors': {
+ 'class': 'logging.FileHandler',
+ 'filename': 'mplog-errors.log',
+ 'mode': 'w',
+ 'level': 'ERROR',
+ 'formatter': 'detailed',
+ },
+ },
+ 'loggers': {
+ 'foo': {
+ 'handlers' : ['foofile']
+ }
+ },
+ 'root': {
+ 'level': 'DEBUG',
+ 'handlers': ['console', 'file', 'errors']
+ },
+ }
+ # Log some initial events, just to show that logging in the parent works
+ # normally.
+ logging.config.dictConfig(config_initial)
+ logger = logging.getLogger('setup')
+ logger.info('About to create workers ...')
+ workers = []
+ for i in range(5):
+ wp = Process(target=worker_process, name='worker %d' % (i + 1),
+ args=(config_worker,))
+ workers.append(wp)
+ wp.start()
+ logger.info('Started worker: %s', wp.name)
+ logger.info('About to create listener ...')
+ stop_event = Event()
+ lp = Process(target=listener_process, name='listener',
+ args=(q, stop_event, config_listener))
+ lp.start()
+ logger.info('Started listener')
+ # We now hang around for the workers to finish their work.
+ for wp in workers:
+ wp.join()
+ # Workers all done, listening can now stop.
+ # Logging in the parent still works normally.
+ logger.info('Telling listener to stop ...')
+ stop_event.set()
+ lp.join()
+ logger.info('All done.')
+
+ if __name__ == '__main__':
+ main()
+
diff --git a/Doc/howto/logging.rst b/Doc/howto/logging.rst
index 2c9514a58d..44b2f59903 100644
--- a/Doc/howto/logging.rst
+++ b/Doc/howto/logging.rst
@@ -651,6 +651,22 @@ You can see that the config file approach has a few advantages over the Python
code approach, mainly separation of configuration and code and the ability of
noncoders to easily modify the logging properties.
+.. warning:: The :func:`fileConfig` function takes a default parameter,
+ ``disable_existing_loggers``, which defaults to ``True`` for reasons of
+ backward compatibility. This may or may not be what you want, since it
+ will cause any loggers existing before the :func:`fileConfig` call to
+ be disabled unless they (or an ancestor) are explicitly named in the
+ configuration. Please refer to the reference documentation for more
+ information, and specify ``False`` for this parameter if you wish.
+
+ The dictionary passed to :func:`dictConfig` can also specify a Boolean
+ value with key ``disable_existing_loggers``, which if not specified
+ explicitly in the dictionary also defaults to being interpreted as
+ ``True``. This leads to the logger-disabling behaviour described above,
+ which may not be what you want - in which case, provide the key
+ explicitly with a value of ``False``.
+
+
.. currentmodule:: logging
Note that the class names referenced in config files need to be either relative
diff --git a/Doc/howto/regex.rst b/Doc/howto/regex.rst
index 07a8b561d0..3ac03ca86a 100644
--- a/Doc/howto/regex.rst
+++ b/Doc/howto/regex.rst
@@ -360,7 +360,7 @@ and more.
You can learn about this by interactively experimenting with the :mod:`re`
module. If you have :mod:`tkinter` available, you may also want to look at
-:file:`Tools/demo/redemo.py`, a demonstration program included with the
+:source:`Tools/demo/redemo.py`, a demonstration program included with the
Python distribution. It allows you to enter REs and strings, and displays
whether the RE matches or fails. :file:`redemo.py` can be quite useful when
trying to debug a complicated RE. Phil Schwartz's `Kodos
@@ -495,7 +495,7 @@ more convenient. If a program contains a lot of regular expressions, or re-uses
the same ones in several locations, then it might be worthwhile to collect all
the definitions in one place, in a section of code that compiles all the REs
ahead of time. To take an example from the standard library, here's an extract
-from the now deprecated :file:`xmllib.py`::
+from the now-defunct Python 2 standard :mod:`xmllib` module::
ref = re.compile( ... )
entityref = re.compile( ... )
diff --git a/Doc/howto/urllib2.rst b/Doc/howto/urllib2.rst
index 76286bdc27..567c1b1820 100644
--- a/Doc/howto/urllib2.rst
+++ b/Doc/howto/urllib2.rst
@@ -108,6 +108,7 @@ library. ::
'language' : 'Python' }
data = urllib.parse.urlencode(values)
+ data = data.encode('utf-8') # data should be bytes
req = urllib.request.Request(url, data)
response = urllib.request.urlopen(req)
the_page = response.read()
@@ -172,7 +173,8 @@ Explorer [#]_. ::
'language' : 'Python' }
headers = { 'User-Agent' : user_agent }
- data = urllib.parse.urlencode(values)
+ data = urllib.parse.urlencode(values)
+ data = data.encode('utf-8')
req = urllib.request.Request(url, data, headers)
response = urllib.request.urlopen(req)
the_page = response.read()
diff --git a/Doc/includes/capsulethunk.h b/Doc/includes/capsulethunk.h
new file mode 100644
index 0000000000..6b20564f13
--- /dev/null
+++ b/Doc/includes/capsulethunk.h
@@ -0,0 +1,134 @@
+#ifndef __CAPSULETHUNK_H
+#define __CAPSULETHUNK_H
+
+#if ( (PY_VERSION_HEX < 0x02070000) \
+ || ((PY_VERSION_HEX >= 0x03000000) \
+ && (PY_VERSION_HEX < 0x03010000)) )
+
+#define __PyCapsule_GetField(capsule, field, default_value) \
+ ( PyCapsule_CheckExact(capsule) \
+ ? (((PyCObject *)capsule)->field) \
+ : (default_value) \
+ ) \
+
+#define __PyCapsule_SetField(capsule, field, value) \
+ ( PyCapsule_CheckExact(capsule) \
+ ? (((PyCObject *)capsule)->field = value), 1 \
+ : 0 \
+ ) \
+
+
+#define PyCapsule_Type PyCObject_Type
+
+#define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
+#define PyCapsule_IsValid(capsule, name) (PyCObject_Check(capsule))
+
+
+#define PyCapsule_New(pointer, name, destructor) \
+ (PyCObject_FromVoidPtr(pointer, destructor))
+
+
+#define PyCapsule_GetPointer(capsule, name) \
+ (PyCObject_AsVoidPtr(capsule))
+
+/* Don't call PyCObject_SetPointer here, it fails if there's a destructor */
+#define PyCapsule_SetPointer(capsule, pointer) \
+ __PyCapsule_SetField(capsule, cobject, pointer)
+
+
+#define PyCapsule_GetDestructor(capsule) \
+ __PyCapsule_GetField(capsule, destructor)
+
+#define PyCapsule_SetDestructor(capsule, dtor) \
+ __PyCapsule_SetField(capsule, destructor, dtor)
+
+
+/*
+ * Sorry, there's simply no place
+ * to store a Capsule "name" in a CObject.
+ */
+#define PyCapsule_GetName(capsule) NULL
+
+static int
+PyCapsule_SetName(PyObject *capsule, const char *unused)
+{
+ unused = unused;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "can't use PyCapsule_SetName with CObjects");
+ return 1;
+}
+
+
+
+#define PyCapsule_GetContext(capsule) \
+ __PyCapsule_GetField(capsule, descr)
+
+#define PyCapsule_SetContext(capsule, context) \
+ __PyCapsule_SetField(capsule, descr, context)
+
+
+static void *
+PyCapsule_Import(const char *name, int no_block)
+{
+ PyObject *object = NULL;
+ void *return_value = NULL;
+ char *trace;
+ size_t name_length = (strlen(name) + 1) * sizeof(char);
+ char *name_dup = (char *)PyMem_MALLOC(name_length);
+
+ if (!name_dup) {
+ return NULL;
+ }
+
+ memcpy(name_dup, name, name_length);
+
+ trace = name_dup;
+ while (trace) {
+ char *dot = strchr(trace, '.');
+ if (dot) {
+ *dot++ = '\0';
+ }
+
+ if (object == NULL) {
+ if (no_block) {
+ object = PyImport_ImportModuleNoBlock(trace);
+ } else {
+ object = PyImport_ImportModule(trace);
+ if (!object) {
+ PyErr_Format(PyExc_ImportError,
+ "PyCapsule_Import could not "
+ "import module \"%s\"", trace);
+ }
+ }
+ } else {
+ PyObject *object2 = PyObject_GetAttrString(object, trace);
+ Py_DECREF(object);
+ object = object2;
+ }
+ if (!object) {
+ goto EXIT;
+ }
+
+ trace = dot;
+ }
+
+ if (PyCObject_Check(object)) {
+ PyCObject *cobject = (PyCObject *)object;
+ return_value = cobject->cobject;
+ } else {
+ PyErr_Format(PyExc_AttributeError,
+ "PyCapsule_Import \"%s\" is not valid",
+ name);
+ }
+
+EXIT:
+ Py_XDECREF(object);
+ if (name_dup) {
+ PyMem_FREE(name_dup);
+ }
+ return return_value;
+}
+
+#endif /* #if PY_VERSION_HEX < 0x02070000 */
+
+#endif /* __CAPSULETHUNK_H */
diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst
index 79a98cbcfd..0123b5cbac 100644
--- a/Doc/library/argparse.rst
+++ b/Doc/library/argparse.rst
@@ -1642,8 +1642,8 @@ Argument groups
--bar BAR bar help
- Note that any arguments not your user defined groups will end up back in the
- usual "positional arguments" and "optional arguments" sections.
+ Note that any arguments not in your user-defined groups will end up back
+ in the usual "positional arguments" and "optional arguments" sections.
Mutual exclusion
@@ -1833,9 +1833,10 @@ A partial upgrade path from :mod:`optparse` to :mod:`argparse`:
* Replace all :meth:`optparse.OptionParser.add_option` calls with
:meth:`ArgumentParser.add_argument` calls.
-* Replace ``options, args = parser.parse_args()`` with ``args =
+* Replace ``(options, args) = parser.parse_args()`` with ``args =
parser.parse_args()`` and add additional :meth:`ArgumentParser.add_argument`
- calls for the positional arguments.
+ calls for the positional arguments. Keep in mind that what was previously
+ called ``options``, now in :mod:`argparse` context is called ``args``.
* Replace callback actions and the ``callback_*`` keyword arguments with
``type`` or ``action`` arguments.
diff --git a/Doc/library/email.encoders.rst b/Doc/library/email.encoders.rst
index 5421b9f66f..81d30961db 100644
--- a/Doc/library/email.encoders.rst
+++ b/Doc/library/email.encoders.rst
@@ -18,6 +18,10 @@ exactly one argument, the message object to encode. They usually extract the
payload, encode it, and reset the payload to this newly encoded value. They
should also set the :mailheader:`Content-Transfer-Encoding` header as appropriate.
+Note that these functions are not meaningful for a multipart message. They
+must be applied to individual subparts instead, and will raise a
+:exc:`TypeError` if passed a message whose type is multipart.
+
Here are the encoding functions provided:
diff --git a/Doc/library/functions.rst b/Doc/library/functions.rst
index f835dcfc71..3fcd6941c2 100644
--- a/Doc/library/functions.rst
+++ b/Doc/library/functions.rst
@@ -247,6 +247,13 @@ are always available. They are listed here in alphabetical order.
the function serves as a numeric conversion function like :func:`int`
and :func:`float`. If both arguments are omitted, returns ``0j``.
+ .. note::
+
+ When converting from a string, the string must not contain whitespace
+ around the central ``+`` or ``-`` operator. For example,
+ ``complex('1+2j')`` is fine, but ``complex('1 + 2j')`` raises
+ :exc:`ValueError`.
+
The complex type is described in :ref:`typesnumeric`.
diff --git a/Doc/library/imp.rst b/Doc/library/imp.rst
index 6e9845ed00..1345b254d6 100644
--- a/Doc/library/imp.rst
+++ b/Doc/library/imp.rst
@@ -64,7 +64,7 @@ This module provides an interface to the mechanisms used to implement the
path and the last item in the *description* tuple is :const:`PKG_DIRECTORY`.
This function does not handle hierarchical module names (names containing
- dots). In order to find *P*.*M*, that is, submodule *M* of package *P*, use
+ dots). In order to find *P.M*, that is, submodule *M* of package *P*, use
:func:`find_module` and :func:`load_module` to find and load package *P*, and
then use :func:`find_module` with the *path* argument set to ``P.__path__``.
When *P* itself has a dotted name, apply this recipe recursively.
@@ -256,7 +256,7 @@ to indicate the search result of :func:`find_module`.
.. data:: PY_FROZEN
- The module was found as a frozen module (see :func:`init_frozen`).
+ The module was found as a frozen module.
.. class:: NullImporter(path_string)
diff --git a/Doc/library/json.rst b/Doc/library/json.rst
index a791259831..f656700887 100644
--- a/Doc/library/json.rst
+++ b/Doc/library/json.rst
@@ -168,6 +168,14 @@ Basic Usage
so trying to serialize multiple objects with repeated calls to
:func:`dump` using the same *fp* will result in an invalid JSON file.
+ .. note::
+
+ Keys in key/value pairs of JSON are always of the type :class:`str`. When
+ a dictionary is converted into JSON, all the keys of the dictionary are
+ coerced to strings. As a result of this, if a dictionary is convered
+ into JSON and then back into a dictionary, the dictionary may not equal
+ the original one. That is, ``loads(dumps(x)) != x`` if x has non-string
+ keys.
.. function:: load(fp, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw)
diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst
index c4dd438f5b..ef65cfa559 100644
--- a/Doc/library/logging.handlers.rst
+++ b/Doc/library/logging.handlers.rst
@@ -654,7 +654,7 @@ event of a certain severity or greater is seen.
:class:`BufferingHandler`, which is an abstract class. This buffers logging
records in memory. Whenever each record is added to the buffer, a check is made
by calling :meth:`shouldFlush` to see if the buffer should be flushed. If it
-should, then :meth:`flush` is expected to do the needful.
+should, then :meth:`flush` is expected to do the flushing.
.. class:: BufferingHandler(capacity)
diff --git a/Doc/library/logging.rst b/Doc/library/logging.rst
index c429c85985..dc97726258 100644
--- a/Doc/library/logging.rst
+++ b/Doc/library/logging.rst
@@ -49,9 +49,22 @@ listed below.
Logger Objects
--------------
-Loggers have the following attributes and methods. Note that Loggers are never
+Loggers have the following attributes and methods. Note that Loggers are never
instantiated directly, but always through the module-level function
-``logging.getLogger(name)``.
+``logging.getLogger(name)``. Multiple calls to :func:`getLogger` with the same
+name will always return a reference to the same Logger object.
+
+The ``name`` is potentially a period-separated hierarchical value, like
+``foo.bar.baz`` (though it could also be just plain ``foo``, for example).
+Loggers that are further down in the hierarchical list are children of loggers
+higher up in the list. For example, given a logger with a name of ``foo``,
+loggers with names of ``foo.bar``, ``foo.bar.baz``, and ``foo.bam`` are all
+descendants of ``foo``. The logger name hierarchy is analogous to the Python
+package hierarchy, and identical to it if you organise your loggers on a
+per-module basis using the recommended construction
+``logging.getLogger(__name__)``. That's because in a module, ``__name__``
+is the module's name in the Python package namespace.
+
.. class:: Logger
@@ -159,7 +172,7 @@ instantiated directly, but always through the module-level function
FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'
logging.basicConfig(format=FORMAT)
- d = { 'clientip' : '192.168.0.1', 'user' : 'fbloggs' }
+ d = {'clientip': '192.168.0.1', 'user': 'fbloggs'}
logger = logging.getLogger('tcpserver')
logger.warning('Protocol problem: %s', 'connection reset', extra=d)
@@ -1077,7 +1090,7 @@ with the :mod:`warnings` module.
If *capture* is ``True``, warnings issued by the :mod:`warnings` module will
be redirected to the logging system. Specifically, a warning will be
formatted using :func:`warnings.formatwarning` and the resulting string
- logged to a logger named ``'py.warnings'`` with a severity of ``'WARNING'``.
+ logged to a logger named ``'py.warnings'`` with a severity of :const:`WARNING`.
If *capture* is ``False``, the redirection of warnings to the logging system
will stop, and warnings will be redirected to their original destinations
diff --git a/Doc/library/markup.rst b/Doc/library/markup.rst
index 49794ef707..1b4cca51a7 100644
--- a/Doc/library/markup.rst
+++ b/Doc/library/markup.rst
@@ -23,7 +23,7 @@ definition of the Python bindings for the DOM and SAX interfaces.
html.rst
html.parser.rst
html.entities.rst
- pyexpat.rst
+ xml.etree.elementtree.rst
xml.dom.rst
xml.dom.minidom.rst
xml.dom.pulldom.rst
@@ -31,4 +31,4 @@ definition of the Python bindings for the DOM and SAX interfaces.
xml.sax.handler.rst
xml.sax.utils.rst
xml.sax.reader.rst
- xml.etree.elementtree.rst
+ pyexpat.rst
diff --git a/Doc/library/operator.rst b/Doc/library/operator.rst
index b03d9df208..9ba7f41104 100644
--- a/Doc/library/operator.rst
+++ b/Doc/library/operator.rst
@@ -340,7 +340,7 @@ Python syntax and the functions in the :mod:`operator` module.
+-----------------------+-------------------------+---------------------------------------+
| Containment Test | ``obj in seq`` | ``contains(seq, obj)`` |
+-----------------------+-------------------------+---------------------------------------+
-| Division | ``a / b`` | ``div(a, b)`` |
+| Division | ``a / b`` | ``truediv(a, b)`` |
+-----------------------+-------------------------+---------------------------------------+
| Division | ``a // b`` | ``floordiv(a, b)`` |
+-----------------------+-------------------------+---------------------------------------+
diff --git a/Doc/library/re.rst b/Doc/library/re.rst
index b196a28f9d..b4e0557011 100644
--- a/Doc/library/re.rst
+++ b/Doc/library/re.rst
@@ -330,16 +330,22 @@ the second character. For example, ``\$`` matches the character ``'$'``.
Matches the empty string, but only at the beginning or end of a word.
A word is defined as a sequence of Unicode alphanumeric or underscore
characters, so the end of a word is indicated by whitespace or a
- non-alphanumeric, non-underscore Unicode character. Note that
- formally, ``\b`` is defined as the boundary between a ``\w`` and a
- ``\W`` character (or vice versa). By default Unicode alphanumerics
- are the ones used, but this can be changed by using the :const:`ASCII`
- flag. Inside a character range, ``\b`` represents the backspace
- character, for compatibility with Python's string literals.
+ non-alphanumeric, non-underscore Unicode character. Note that formally,
+ ``\b`` is defined as the boundary between a ``\w`` and a ``\W`` character
+ (or vice versa), or between ``\w`` and the beginning/end of the string.
+ This means that ``r'\bfoo\b'`` matches ``'foo'``, ``'foo.'``, ``'(foo)'``,
+ ``'bar foo baz'`` but not ``'foobar'`` or ``'foo3'``.
+
+ By default Unicode alphanumerics are the ones used, but this can be changed
+ by using the :const:`ASCII` flag. Inside a character range, ``\b``
+ represents the backspace character, for compatibility with Python's string
+ literals.
``\B``
- Matches the empty string, but only when it is *not* at the beginning or end of a
- word. This is just the opposite of ``\b``, so word characters are
+ Matches the empty string, but only when it is *not* at the beginning or end
+ of a word. This means that ``r'py\B'`` matches ``'python'``, ``'py3'``,
+ ``'py2'``, but not ``'py'``, ``'py.'``, or ``'py!'``.
+ ``\B`` is just the opposite of ``\b``, so word characters are
Unicode alphanumerics or the underscore, although this can be changed
by using the :const:`ASCII` flag.
@@ -417,31 +423,6 @@ a group reference. As for string literals, octal escapes are always at most
three digits in length.
-.. _matching-searching:
-
-Matching vs. Searching
-----------------------
-
-.. sectionauthor:: Fred L. Drake, Jr. <fdrake@acm.org>
-
-
-Python offers two different primitive operations based on regular expressions:
-**match** checks for a match only at the beginning of the string, while
-**search** checks for a match anywhere in the string (this is what Perl does
-by default).
-
-Note that match may differ from search even when using a regular expression
-beginning with ``'^'``: ``'^'`` matches only at the start of the string, or in
-:const:`MULTILINE` mode also immediately following a newline. The "match"
-operation succeeds only if the pattern matches at the start of the string
-regardless of mode, or at the starting position given by the optional *pos*
-argument regardless of whether a newline precedes it.
-
- >>> re.match("c", "abcdef") # No match
- >>> re.search("c", "abcdef") # Match
- <_sre.SRE_Match object at ...>
-
-
.. _contents-of-module-re:
Module Contents
@@ -575,10 +556,11 @@ form.
<match-objects>`. Return ``None`` if the string does not match the pattern;
note that this is different from a zero-length match.
- .. note::
+ Note that even in :const:`MULTILINE` mode, :func:`re.match` will only match
+ at the beginning of the string and not at the beginning of each line.
- If you want to locate a match anywhere in *string*, use :func:`search`
- instead.
+ If you want to locate a match anywhere in *string*, use :func:`search`
+ instead (see also :ref:`search-vs-match`).
.. function:: split(pattern, string, maxsplit=0, flags=0)
@@ -762,16 +744,14 @@ attributes:
The optional *pos* and *endpos* parameters have the same meaning as for the
:meth:`~regex.search` method.
- .. note::
-
- If you want to locate a match anywhere in *string*, use
- :meth:`~regex.search` instead.
-
>>> pattern = re.compile("o")
>>> pattern.match("dog") # No match as "o" is not at the start of "dog".
>>> pattern.match("dog", 1) # Match as "o" is the 2nd character of "dog".
<_sre.SRE_Match object at ...>
+ If you want to locate a match anywhere in *string*, use
+ :meth:`~regex.search` instead (see also :ref:`search-vs-match`).
+
.. method:: regex.split(string, maxsplit=0)
@@ -804,8 +784,9 @@ attributes:
.. attribute:: regex.flags
- The flags argument used when the RE object was compiled, or ``0`` if no flags
- were provided.
+ The regex matching flags. This is a combination of the flags given to
+ :func:`.compile`, any ``(?...)`` inline flags in the pattern, and implicit
+ flags such as :data:`UNICODE` if the pattern is a Unicode string.
.. attribute:: regex.groups
@@ -964,16 +945,15 @@ support the following methods and attributes:
.. attribute:: match.pos
The value of *pos* which was passed to the :meth:`~regex.search` or
- :meth:`~regex.match` method of a :ref:`match object <match-objects>`. This
- is the index into the string at which the RE engine started looking for a
- match.
+ :meth:`~regex.match` method of a :ref:`regex object <re-objects>`. This is
+ the index into the string at which the RE engine started looking for a match.
.. attribute:: match.endpos
The value of *endpos* which was passed to the :meth:`~regex.search` or
- :meth:`~regex.match` method of a :ref:`match object <match-objects>`. This
- is the index into the string beyond which the RE engine will not go.
+ :meth:`~regex.match` method of a :ref:`regex object <re-objects>`. This is
+ the index into the string beyond which the RE engine will not go.
.. attribute:: match.lastindex
@@ -1111,59 +1091,39 @@ The equivalent regular expression would be ::
(\S+) - (\d+) errors, (\d+) warnings
-Avoiding recursion
-^^^^^^^^^^^^^^^^^^
-
-If you create regular expressions that require the engine to perform a lot of
-recursion, you may encounter a :exc:`RuntimeError` exception with the message
-``maximum recursion limit exceeded``. For example, ::
-
- >>> s = 'Begin ' + 1000*'a very long string ' + 'end'
- >>> re.match('Begin (\w| )*? end', s).end()
- Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- File "/usr/local/lib/python3.2/re.py", line 132, in match
- return _compile(pattern, flags).match(string)
- RuntimeError: maximum recursion limit exceeded
-
-You can often restructure your regular expression to avoid recursion.
-
-Simple uses of the ``*?`` pattern are special-cased to avoid recursion. Thus,
-the above regular expression can avoid recursion by being recast as ``Begin
-[a-zA-Z0-9_ ]*?end``. As a further benefit, such regular expressions will run
-faster than their recursive equivalents.
-
+.. _search-vs-match:
search() vs. match()
^^^^^^^^^^^^^^^^^^^^
-In a nutshell, :func:`match` only attempts to match a pattern at the beginning
-of a string where :func:`search` will match a pattern anywhere in a string.
-For example:
+.. sectionauthor:: Fred L. Drake, Jr. <fdrake@acm.org>
- >>> re.match("o", "dog") # No match as "o" is not the first letter of "dog".
- >>> re.search("o", "dog") # Match as search() looks everywhere in the string.
- <_sre.SRE_Match object at ...>
+Python offers two different primitive operations based on regular expressions:
+:func:`re.match` checks for a match only at the beginning of the string, while
+:func:`re.search` checks for a match anywhere in the string (this is what Perl
+does by default).
-.. note::
+For example::
- The following applies only to regular expression objects like those created
- with ``re.compile("pattern")``, not the primitives ``re.match(pattern,
- string)`` or ``re.search(pattern, string)``.
+ >>> re.match("c", "abcdef") # No match
+ >>> re.search("c", "abcdef") # Match
+ <_sre.SRE_Match object at ...>
-:func:`match` has an optional second parameter that gives an index in the string
-where the search is to start::
+Regular expressions beginning with ``'^'`` can be used with :func:`search` to
+restrict the match at the beginning of the string::
- >>> pattern = re.compile("o")
- >>> pattern.match("dog") # No match as "o" is not at the start of "dog."
+ >>> re.match("c", "abcdef") # No match
+ >>> re.search("^c", "abcdef") # No match
+ >>> re.search("^a", "abcdef") # Match
+ <_sre.SRE_Match object at ...>
- # Equivalent to the above expression as 0 is the default starting index:
- >>> pattern.match("dog", 0)
+Note however that in :const:`MULTILINE` mode :func:`match` only matches at the
+beginning of the string, whereas using :func:`search` with a regular expression
+beginning with ``'^'`` will match at the beginning of each line.
- # Match as "o" is the 2nd character of "dog" (index 0 is the first):
- >>> pattern.match("dog", 1)
+ >>> re.match('X', 'A\nB\nX', re.MULTILINE) # No match
+ >>> re.search('^X', 'A\nB\nX', re.MULTILINE) # Match
<_sre.SRE_Match object at ...>
- >>> pattern.match("dog", 2) # No match as "o" is not the 3rd character of "dog."
Making a Phonebook
@@ -1177,7 +1137,7 @@ creates a phonebook.
First, here is the input. Normally it may come from a file, here we are using
triple-quoted string syntax:
- >>> input = """Ross McFluff: 834.345.1254 155 Elm Street
+ >>> text = """Ross McFluff: 834.345.1254 155 Elm Street
...
... Ronald Heathmore: 892.345.3428 436 Finley Avenue
... Frank Burger: 925.541.7625 662 South Dogwood Way
@@ -1191,7 +1151,7 @@ into a list with each nonempty line having its own entry:
.. doctest::
:options: +NORMALIZE_WHITESPACE
- >>> entries = re.split("\n+", input)
+ >>> entries = re.split("\n+", text)
>>> entries
['Ross McFluff: 834.345.1254 155 Elm Street',
'Ronald Heathmore: 892.345.3428 436 Finley Avenue',
diff --git a/Doc/library/signal.rst b/Doc/library/signal.rst
index 698b1e74f4..d1cae13d62 100644
--- a/Doc/library/signal.rst
+++ b/Doc/library/signal.rst
@@ -5,46 +5,58 @@
:synopsis: Set handlers for asynchronous events.
-This module provides mechanisms to use signal handlers in Python. Some general
-rules for working with signals and their handlers:
-
-* A handler for a particular signal, once set, remains installed until it is
- explicitly reset (Python emulates the BSD style interface regardless of the
- underlying implementation), with the exception of the handler for
- :const:`SIGCHLD`, which follows the underlying implementation.
-
-* There is no way to "block" signals temporarily from critical sections (since
- this is not supported by all Unix flavors).
-
-* Although Python signal handlers are called asynchronously as far as the Python
- user is concerned, they can only occur between the "atomic" instructions of the
- Python interpreter. This means that signals arriving during long calculations
- implemented purely in C (such as regular expression matches on large bodies of
- text) may be delayed for an arbitrary amount of time.
-
-* When a signal arrives during an I/O operation, it is possible that the I/O
- operation raises an exception after the signal handler returns. This is
- dependent on the underlying Unix system's semantics regarding interrupted system
- calls.
-
-* Because the C signal handler always returns, it makes little sense to catch
- synchronous errors like :const:`SIGFPE` or :const:`SIGSEGV`.
-
-* Python installs a small number of signal handlers by default: :const:`SIGPIPE`
- is ignored (so write errors on pipes and sockets can be reported as ordinary
- Python exceptions) and :const:`SIGINT` is translated into a
- :exc:`KeyboardInterrupt` exception. All of these can be overridden.
-
-* Some care must be taken if both signals and threads are used in the same
- program. The fundamental thing to remember in using signals and threads
- simultaneously is: always perform :func:`signal` operations in the main thread
- of execution. Any thread can perform an :func:`alarm`, :func:`getsignal`,
- :func:`pause`, :func:`setitimer` or :func:`getitimer`; only the main thread
- can set a new signal handler, and the main thread will be the only one to
- receive signals (this is enforced by the Python :mod:`signal` module, even
- if the underlying thread implementation supports sending signals to
- individual threads). This means that signals can't be used as a means of
- inter-thread communication. Use locks instead.
+This module provides mechanisms to use signal handlers in Python.
+
+
+General rules
+-------------
+
+The :func:`signal.signal` function allows to define custom handlers to be
+executed when a signal is received. A small number of default handlers are
+installed: :const:`SIGPIPE` is ignored (so write errors on pipes and sockets
+can be reported as ordinary Python exceptions) and :const:`SIGINT` is
+translated into a :exc:`KeyboardInterrupt` exception.
+
+A handler for a particular signal, once set, remains installed until it is
+explicitly reset (Python emulates the BSD style interface regardless of the
+underlying implementation), with the exception of the handler for
+:const:`SIGCHLD`, which follows the underlying implementation.
+
+There is no way to "block" signals temporarily from critical sections (since
+this is not supported by all Unix flavors).
+
+
+Execution of Python signal handlers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A Python signal handler does not get executed inside the low-level (C) signal
+handler. Instead, the low-level signal handler sets a flag which tells the
+:term:`virtual machine` to execute the corresponding Python signal handler
+at a later point(for example at the next :term:`bytecode` instruction).
+This has consequences:
+
+* It makes little sense to catch synchronous errors like :const:`SIGFPE` or
+ :const:`SIGSEGV`.
+
+* A long-running calculation implemented purely in C (such as regular
+ expression matching on a large body of text) may run uninterrupted for an
+ arbitrary amount of time, regardless of any signals received. The Python
+ signal handlers will be called when the calculation finishes.
+
+
+Signals and threads
+^^^^^^^^^^^^^^^^^^^
+
+Python signal handlers are always executed in the main Python thread,
+even if the signal was received in another thread. This means that signals
+can't be used as a means of inter-thread communication. You can use
+the synchronization primitives from the :mod:`threading` module instead.
+
+Besides, only the main thread is allowed to set a new signal handler.
+
+
+Module contents
+---------------
The variables defined in the :mod:`signal` module are:
diff --git a/Doc/library/sqlite3.rst b/Doc/library/sqlite3.rst
index f0fd86cb85..41db5c37a9 100644
--- a/Doc/library/sqlite3.rst
+++ b/Doc/library/sqlite3.rst
@@ -3,7 +3,7 @@
.. module:: sqlite3
:synopsis: A DB-API 2.0 implementation using SQLite 3.x.
-.. sectionauthor:: Gerhard Häring <gh@ghaering.de>
+.. sectionauthor:: Gerhard Häring <gh@ghaering.de>
SQLite is a C library that provides a lightweight disk-based database that
@@ -20,6 +20,7 @@ To use the module, you must first create a :class:`Connection` object that
represents the database. Here the data will be stored in the
:file:`/tmp/example` file::
+ import sqlite3
conn = sqlite3.connect('/tmp/example')
You can also supply the special name ``:memory:`` to create a database in RAM.
@@ -56,7 +57,7 @@ example::
# Never do this -- insecure!
symbol = 'IBM'
- c.execute("... where symbol = '%s'" % symbol)
+ c.execute("select * from stocks where symbol = '%s'" % symbol)
# Do this instead
t = (symbol,)
@@ -64,7 +65,7 @@ example::
# Larger example
for t in [('2006-03-28', 'BUY', 'IBM', 1000, 45.00),
- ('2006-04-05', 'BUY', 'MSOFT', 1000, 72.00),
+ ('2006-04-05', 'BUY', 'MSFT', 1000, 72.00),
('2006-04-06', 'SELL', 'IBM', 500, 53.00),
]:
c.execute('insert into stocks values (?,?,?,?,?)', t)
@@ -271,7 +272,6 @@ Connection Objects
calling the cursor method, then calls the cursor's :meth:`executemany
<Cursor.executemany>` method with the parameters given.
-
.. method:: Connection.executescript(sql_script)
This is a nonstandard shortcut that creates an intermediate cursor object by
@@ -376,22 +376,22 @@ Connection Objects
aggregates or whole new virtual table implementations. One well-known
extension is the fulltext-search extension distributed with SQLite.
+ Loadable extensions are disabled by default. See [#f1]_.
+
.. versionadded:: 3.2
.. literalinclude:: ../includes/sqlite3/load_extension.py
- Loadable extensions are disabled by default. See [#f1]_.
-
.. method:: Connection.load_extension(path)
This routine loads a SQLite extension from a shared library. You have to
enable extension loading with :meth:`enable_load_extension` before you can
use this routine.
- .. versionadded:: 3.2
-
Loadable extensions are disabled by default. See [#f1]_.
+ .. versionadded:: 3.2
+
.. attribute:: Connection.row_factory
You can change this attribute to a callable that accepts the cursor and the
diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst
index 5f5d3b6ce7..153ee44cc9 100644
--- a/Doc/library/stdtypes.rst
+++ b/Doc/library/stdtypes.rst
@@ -1437,8 +1437,13 @@ Old String Formatting Operations
.. note::
- The formatting operations described here are obsolete and may go away in future
- versions of Python. Use the new :ref:`string-formatting` in new code.
+ The formatting operations described here are modelled on C's printf()
+ syntax. They only support formatting of certain builtin types. The
+ use of a binary operator means that care may be needed in order to
+ format tuples and dictionaries correctly. As the new
+ :ref:`string-formatting` syntax is more flexible and handles tuples and
+ dictionaries naturally, it is recommended for new code. However, there
+ are no current plans to deprecate printf-style formatting.
String objects have one unique built-in operation: the ``%`` operator (modulo).
This is also known as the string *formatting* or *interpolation* operator.
diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst
index b7e87ab302..90a01d0a8f 100644
--- a/Doc/library/subprocess.rst
+++ b/Doc/library/subprocess.rst
@@ -735,7 +735,7 @@ The p1.stdout.close() call after starting the p2 is important in order for p1
to receive a SIGPIPE if p2 exits before p1.
Alternatively, for trusted input, the shell's own pipeline support may still
-be used directly:
+be used directly::
output=`dmesg | grep hda`
# becomes
diff --git a/Doc/library/syslog.rst b/Doc/library/syslog.rst
index 795d66d99f..645c326678 100644
--- a/Doc/library/syslog.rst
+++ b/Doc/library/syslog.rst
@@ -78,7 +78,8 @@ Priority levels (high to low):
Facilities:
:const:`LOG_KERN`, :const:`LOG_USER`, :const:`LOG_MAIL`, :const:`LOG_DAEMON`,
:const:`LOG_AUTH`, :const:`LOG_LPR`, :const:`LOG_NEWS`, :const:`LOG_UUCP`,
- :const:`LOG_CRON` and :const:`LOG_LOCAL0` to :const:`LOG_LOCAL7`.
+ :const:`LOG_CRON`, :const:`LOG_SYSLOG` and :const:`LOG_LOCAL0` to
+ :const:`LOG_LOCAL7`.
Log options:
:const:`LOG_PID`, :const:`LOG_CONS`, :const:`LOG_NDELAY`, :const:`LOG_NOWAIT`
diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst
index 9b3affd979..8571104c4c 100644
--- a/Doc/library/threading.rst
+++ b/Doc/library/threading.rst
@@ -218,30 +218,31 @@ Thread Objects
This class represents an activity that is run in a separate thread of control.
There are two ways to specify the activity: by passing a callable object to the
-constructor, or by overriding the :meth:`run` method in a subclass. No other
-methods (except for the constructor) should be overridden in a subclass. In
-other words, *only* override the :meth:`__init__` and :meth:`run` methods of
-this class.
+constructor, or by overriding the :meth:`~Thread.run` method in a subclass.
+No other methods (except for the constructor) should be overridden in a
+subclass. In other words, *only* override the :meth:`~Thread.__init__`
+and :meth:`~Thread.run` methods of this class.
Once a thread object is created, its activity must be started by calling the
-thread's :meth:`start` method. This invokes the :meth:`run` method in a
-separate thread of control.
+thread's :meth:`~Thread.start` method. This invokes the :meth:`~Thread.run`
+method in a separate thread of control.
Once the thread's activity is started, the thread is considered 'alive'. It
-stops being alive when its :meth:`run` method terminates -- either normally, or
-by raising an unhandled exception. The :meth:`is_alive` method tests whether the
-thread is alive.
+stops being alive when its :meth:`~Thread.run` method terminates -- either
+normally, or by raising an unhandled exception. The :meth:`~Thread.is_alive`
+method tests whether the thread is alive.
-Other threads can call a thread's :meth:`join` method. This blocks the calling
-thread until the thread whose :meth:`join` method is called is terminated.
+Other threads can call a thread's :meth:`~Thread.join` method. This blocks
+the calling thread until the thread whose :meth:`~Thread.join` method is
+called is terminated.
A thread has a name. The name can be passed to the constructor, and read or
-changed through the :attr:`name` attribute.
+changed through the :attr:`~Thread.name` attribute.
-A thread can be flagged as a "daemon thread". The significance of this flag is
-that the entire Python program exits when only daemon threads are left. The
-initial value is inherited from the creating thread. The flag can be set
-through the :attr:`daemon` property.
+A thread can be flagged as a "daemon thread". The significance of this flag
+is that the entire Python program exits when only daemon threads are left.
+The initial value is inherited from the creating thread. The flag can be
+set through the :attr:`~Thread.daemon` property.
There is a "main thread" object; this corresponds to the initial thread of
control in the Python program. It is not a daemon thread.
@@ -250,8 +251,8 @@ There is the possibility that "dummy thread objects" are created. These are
thread objects corresponding to "alien threads", which are threads of control
started outside the threading module, such as directly from C code. Dummy
thread objects have limited functionality; they are always considered alive and
-daemonic, and cannot be :meth:`join`\ ed. They are never deleted, since it is
-impossible to detect the termination of alien threads.
+daemonic, and cannot be :meth:`~Thread.join`\ ed. They are never deleted,
+since it is impossible to detect the termination of alien threads.
.. class:: Thread(group=None, target=None, name=None, args=(), kwargs={})
@@ -282,7 +283,8 @@ impossible to detect the termination of alien threads.
Start the thread's activity.
It must be called at most once per thread object. It arranges for the
- object's :meth:`run` method to be invoked in a separate thread of control.
+ object's :meth:`~Thread.run` method to be invoked in a separate thread
+ of control.
This method will raise a :exc:`RuntimeError` if called more than once
on the same thread object.
@@ -298,25 +300,27 @@ impossible to detect the termination of alien threads.
.. method:: join(timeout=None)
- Wait until the thread terminates. This blocks the calling thread until the
- thread whose :meth:`join` method is called terminates -- either normally
- or through an unhandled exception -- or until the optional timeout occurs.
+ Wait until the thread terminates. This blocks the calling thread until
+ the thread whose :meth:`~Thread.join` method is called terminates -- either
+ normally or through an unhandled exception --, or until the optional
+ timeout occurs.
When the *timeout* argument is present and not ``None``, it should be a
floating point number specifying a timeout for the operation in seconds
- (or fractions thereof). As :meth:`join` always returns ``None``, you must
- call :meth:`is_alive` after :meth:`join` to decide whether a timeout
- happened -- if the thread is still alive, the :meth:`join` call timed out.
+ (or fractions thereof). As :meth:`~Thread.join` always returns ``None``,
+ you must call :meth:`~Thread.is_alive` after :meth:`~Thread.join` to
+ decide whether a timeout happened -- if the thread is still alive, the
+ :meth:`~Thread.join` call timed out.
When the *timeout* argument is not present or ``None``, the operation will
block until the thread terminates.
- A thread can be :meth:`join`\ ed many times.
+ A thread can be :meth:`~Thread.join`\ ed many times.
- :meth:`join` raises a :exc:`RuntimeError` if an attempt is made to join
- the current thread as that would cause a deadlock. It is also an error to
- :meth:`join` a thread before it has been started and attempts to do so
- raises the same exception.
+ :meth:`~Thread.join` raises a :exc:`RuntimeError` if an attempt is made
+ to join the current thread as that would cause a deadlock. It is also
+ an error to :meth:`~Thread.join` a thread before it has been started
+ and attempts to do so raise the same exception.
.. attribute:: name
@@ -334,7 +338,7 @@ impossible to detect the termination of alien threads.
The 'thread identifier' of this thread or ``None`` if the thread has not
been started. This is a nonzero integer. See the
- :func:`thread.get_ident()` function. Thread identifiers may be recycled
+ :func:`_thread.get_ident()` function. Thread identifiers may be recycled
when a thread exits and another thread is created. The identifier is
available even after the thread has exited.
@@ -342,18 +346,18 @@ impossible to detect the termination of alien threads.
Return whether the thread is alive.
- This method returns ``True`` just before the :meth:`run` method starts
- until just after the :meth:`run` method terminates. The module function
- :func:`.enumerate` returns a list of all alive threads.
+ This method returns ``True`` just before the :meth:`~Thread.run` method
+ starts until just after the :meth:`~Thread.run` method terminates. The
+ module function :func:`.enumerate` returns a list of all alive threads.
.. attribute:: daemon
A boolean value indicating whether this thread is a daemon thread (True)
- or not (False). This must be set before :meth:`start` is called,
+ or not (False). This must be set before :meth:`~Thread.start` is called,
otherwise :exc:`RuntimeError` is raised. Its initial value is inherited
from the creating thread; the main thread is not a daemon thread and
- therefore all threads created in the main thread default to :attr:`daemon`
- = ``False``.
+ therefore all threads created in the main thread default to
+ :attr:`~Thread.daemon` = ``False``.
The entire Python program exits when no alive non-daemon threads are left.
@@ -375,19 +379,22 @@ synchronization primitive available, implemented directly by the :mod:`_thread`
extension module.
A primitive lock is in one of two states, "locked" or "unlocked". It is created
-in the unlocked state. It has two basic methods, :meth:`acquire` and
-:meth:`release`. When the state is unlocked, :meth:`acquire` changes the state
-to locked and returns immediately. When the state is locked, :meth:`acquire`
-blocks until a call to :meth:`release` in another thread changes it to unlocked,
-then the :meth:`acquire` call resets it to locked and returns. The
-:meth:`release` method should only be called in the locked state; it changes the
-state to unlocked and returns immediately. If an attempt is made to release an
-unlocked lock, a :exc:`RuntimeError` will be raised.
-
-When more than one thread is blocked in :meth:`acquire` waiting for the state to
-turn to unlocked, only one thread proceeds when a :meth:`release` call resets
-the state to unlocked; which one of the waiting threads proceeds is not defined,
-and may vary across implementations.
+in the unlocked state. It has two basic methods, :meth:`~Lock.acquire` and
+:meth:`~Lock.release`. When the state is unlocked, :meth:`~Lock.acquire`
+changes the state to locked and returns immediately. When the state is locked,
+:meth:`~Lock.acquire` blocks until a call to :meth:`~Lock.release` in another
+thread changes it to unlocked, then the :meth:`~Lock.acquire` call resets it
+to locked and returns. The :meth:`~Lock.release` method should only be
+called in the locked state; it changes the state to unlocked and returns
+immediately. If an attempt is made to release an unlocked lock, a
+:exc:`RuntimeError` will be raised.
+
+Locks also support the :ref:`context manager protocol <with-locks>`.
+
+When more than one thread is blocked in :meth:`~Lock.acquire` waiting for the
+state to turn to unlocked, only one thread proceeds when a :meth:`~Lock.release`
+call resets the state to unlocked; which one of the waiting threads proceeds
+is not defined, and may vary across implementations.
All methods are executed atomically.
@@ -424,13 +431,14 @@ All methods are executed atomically.
.. method:: Lock.release()
- Release a lock.
+ Release a lock. This can be called from any thread, not only the thread
+ which has acquired the lock.
When the lock is locked, reset it to unlocked, and return. If any other threads
are blocked waiting for the lock to become unlocked, allow exactly one of them
to proceed.
- Do not call this method when the lock is unlocked.
+ When invoked on an unlocked lock, a :exc:`ThreadError` is raised.
There is no return value.
@@ -446,12 +454,14 @@ and "recursion level" in addition to the locked/unlocked state used by primitive
locks. In the locked state, some thread owns the lock; in the unlocked state,
no thread owns it.
-To lock the lock, a thread calls its :meth:`acquire` method; this returns once
-the thread owns the lock. To unlock the lock, a thread calls its
-:meth:`release` method. :meth:`acquire`/:meth:`release` call pairs may be
-nested; only the final :meth:`release` (the :meth:`release` of the outermost
-pair) resets the lock to unlocked and allows another thread blocked in
-:meth:`acquire` to proceed.
+To lock the lock, a thread calls its :meth:`~RLock.acquire` method; this
+returns once the thread owns the lock. To unlock the lock, a thread calls
+its :meth:`~Lock.release` method. :meth:`~Lock.acquire`/:meth:`~Lock.release`
+call pairs may be nested; only the final :meth:`~Lock.release` (the
+:meth:`~Lock.release` of the outermost pair) resets the lock to unlocked and
+allows another thread blocked in :meth:`~Lock.acquire` to proceed.
+
+Reentrant locks also support the :ref:`context manager protocol <with-locks>`.
.. method:: RLock.acquire(blocking=True, timeout=-1)
@@ -503,63 +513,75 @@ Condition Objects
-----------------
A condition variable is always associated with some kind of lock; this can be
-passed in or one will be created by default. (Passing one in is useful when
-several condition variables must share the same lock.)
-
-A condition variable has :meth:`acquire` and :meth:`release` methods that call
-the corresponding methods of the associated lock. It also has a :meth:`wait`
-method, and :meth:`notify` and :meth:`notify_all` methods. These three must only
-be called when the calling thread has acquired the lock, otherwise a
-:exc:`RuntimeError` is raised.
-
-The :meth:`wait` method releases the lock, and then blocks until it is awakened
-by a :meth:`notify` or :meth:`notify_all` call for the same condition variable in
-another thread. Once awakened, it re-acquires the lock and returns. It is also
-possible to specify a timeout.
-
-The :meth:`notify` method wakes up one of the threads waiting for the condition
-variable, if any are waiting. The :meth:`notify_all` method wakes up all threads
-waiting for the condition variable.
-
-Note: the :meth:`notify` and :meth:`notify_all` methods don't release the lock;
-this means that the thread or threads awakened will not return from their
-:meth:`wait` call immediately, but only when the thread that called
-:meth:`notify` or :meth:`notify_all` finally relinquishes ownership of the lock.
-
-Tip: the typical programming style using condition variables uses the lock to
+passed in or one will be created by default. Passing one in is useful when
+several condition variables must share the same lock. The lock is part of
+the condition object: you don't have to track it separately.
+
+A condition variable obeys the :ref:`context manager protocol <with-locks>`:
+using the ``with`` statement acquires the associated lock for the duration of
+the enclosed block. The :meth:`~Condition.acquire` and
+:meth:`~Condition.release` methods also call the corresponding methods of
+the associated lock.
+
+Other methods must be called with the associated lock held. The
+:meth:`~Condition.wait` method releases the lock, and then blocks until
+another thread awakens it by calling :meth:`~Condition.notify` or
+:meth:`~Condition.notify_all`. Once awakened, :meth:`~Condition.wait`
+re-acquires the lock and returns. It is also possible to specify a timeout.
+
+The :meth:`~Condition.notify` method wakes up one of the threads waiting for
+the condition variable, if any are waiting. The :meth:`~Condition.notify_all`
+method wakes up all threads waiting for the condition variable.
+
+Note: the :meth:`~Condition.notify` and :meth:`~Condition.notify_all` methods
+don't release the lock; this means that the thread or threads awakened will
+not return from their :meth:`~Condition.wait` call immediately, but only when
+the thread that called :meth:`~Condition.notify` or :meth:`~Condition.notify_all`
+finally relinquishes ownership of the lock.
+
+
+Usage
+^^^^^
+
+The typical programming style using condition variables uses the lock to
synchronize access to some shared state; threads that are interested in a
-particular change of state call :meth:`wait` repeatedly until they see the
-desired state, while threads that modify the state call :meth:`notify` or
-:meth:`notify_all` when they change the state in such a way that it could
-possibly be a desired state for one of the waiters. For example, the following
-code is a generic producer-consumer situation with unlimited buffer capacity::
+particular change of state call :meth:`~Condition.wait` repeatedly until they
+see the desired state, while threads that modify the state call
+:meth:`~Condition.notify` or :meth:`~Condition.notify_all` when they change
+the state in such a way that it could possibly be a desired state for one
+of the waiters. For example, the following code is a generic
+producer-consumer situation with unlimited buffer capacity::
# Consume one item
- cv.acquire()
- while not an_item_is_available():
- cv.wait()
- get_an_available_item()
- cv.release()
+ with cv:
+ while not an_item_is_available():
+ cv.wait()
+ get_an_available_item()
# Produce one item
- cv.acquire()
- make_an_item_available()
- cv.notify()
- cv.release()
+ with cv:
+ make_an_item_available()
+
+The ``while`` loop checking for the application's condition is necessary
+because :meth:`~Condition.wait` can return after an arbitrary long time,
+and other threads may have exhausted the available items in between. This
+is inherent to multi-threaded programming. The :meth:`~Condition.wait_for`
+method can be used to automate the condition checking::
-To choose between :meth:`notify` and :meth:`notify_all`, consider whether one
-state change can be interesting for only one or several waiting threads. E.g.
-in a typical producer-consumer situation, adding one item to the buffer only
-needs to wake up one consumer thread.
+ # Consume an item
+ with cv:
+ cv.wait_for(an_item_is_available)
+ get_an_available_item()
-Note: Condition variables can be, depending on the implementation, subject
-to both spurious wakeups (when :meth:`wait` returns without a :meth:`notify`
-call) and stolen wakeups (when another thread acquires the lock before the
-awoken thread.) For this reason, it is always necessary to verify the state
-the thread is waiting for when :meth:`wait` returns and optionally repeat
-the call as often as necessary.
+To choose between :meth:`~Condition.notify` and :meth:`~Condition.notify_all`,
+consider whether one state change can be interesting for only one or several
+waiting threads. E.g. in a typical producer-consumer situation, adding one
+item to the buffer only needs to wake up one consumer thread.
+Interface
+^^^^^^^^^
+
.. class:: Condition(lock=None)
If the *lock* argument is given and not ``None``, it must be a :class:`Lock`
@@ -626,12 +648,6 @@ the call as often as necessary.
held when called and is re-aquired on return. The predicate is evaluated
with the lock held.
- Using this method, the consumer example above can be written thus::
-
- with cv:
- cv.wait_for(an_item_is_available)
- get_an_available_item()
-
.. versionadded:: 3.2
.. method:: notify(n=1)
@@ -667,12 +683,16 @@ Semaphore Objects
This is one of the oldest synchronization primitives in the history of computer
science, invented by the early Dutch computer scientist Edsger W. Dijkstra (he
-used :meth:`P` and :meth:`V` instead of :meth:`acquire` and :meth:`release`).
+used the names ``P()`` and ``V()`` instead of :meth:`~Semaphore.acquire` and
+:meth:`~Semaphore.release`).
A semaphore manages an internal counter which is decremented by each
-:meth:`acquire` call and incremented by each :meth:`release` call. The counter
-can never go below zero; when :meth:`acquire` finds that it is zero, it blocks,
-waiting until some other thread calls :meth:`release`.
+:meth:`~Semaphore.acquire` call and incremented by each :meth:`~Semaphore.release`
+call. The counter can never go below zero; when :meth:`~Semaphore.acquire`
+finds that it is zero, it blocks, waiting until some other thread calls
+:meth:`~Semaphore.release`.
+
+Semaphores also support the :ref:`context manager protocol <with-locks>`.
.. class:: Semaphore(value=1)
@@ -688,11 +708,12 @@ waiting until some other thread calls :meth:`release`.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called
- :meth:`release` to make it larger than zero. This is done with proper
- interlocking so that if multiple :meth:`acquire` calls are blocked,
- :meth:`release` will wake exactly one of them up. The implementation may
- pick one at random, so the order in which blocked threads are awakened
- should not be relied on. Returns true (or blocks indefinitely).
+ :meth:`~Semaphore.release` to make it larger than zero. This is done
+ with proper interlocking so that if multiple :meth:`acquire` calls are
+ blocked, :meth:`~Semaphore.release` will wake exactly one of them up.
+ The implementation may pick one at random, so the order in which
+ blocked threads are awakened should not be relied on. Returns
+ true (or blocks indefinitely).
When invoked with *blocking* set to false, do not block. If a call
without an argument would block, return false immediately; otherwise,
@@ -729,11 +750,12 @@ main thread would initialize the semaphore::
Once spawned, worker threads call the semaphore's acquire and release methods
when they need to connect to the server::
- pool_sema.acquire()
- conn = connectdb()
- ... use connection ...
- conn.close()
- pool_sema.release()
+ with pool_sema:
+ conn = connectdb()
+ try:
+ ... use connection ...
+ finally:
+ conn.close()
The use of a bounded semaphore reduces the chance that a programming error which
causes the semaphore to be released more than it's acquired will go undetected.
@@ -748,8 +770,8 @@ This is one of the simplest mechanisms for communication between threads: one
thread signals an event and other threads wait for it.
An event object manages an internal flag that can be set to true with the
-:meth:`~Event.set` method and reset to false with the :meth:`clear` method. The
-:meth:`wait` method blocks until the flag is true.
+:meth:`~Event.set` method and reset to false with the :meth:`~Event.clear`
+method. The :meth:`~Event.wait` method blocks until the flag is true.
.. class:: Event()
@@ -776,7 +798,7 @@ An event object manages an internal flag that can be set to true with the
Block until the internal flag is true. If the internal flag is true on
entry, return immediately. Otherwise, block until another thread calls
- :meth:`set` to set the flag to true, or until the optional timeout occurs.
+ :meth:`.set` to set the flag to true, or until the optional timeout occurs.
When the timeout argument is present and not ``None``, it should be a
floating point number specifying a timeout for the operation in seconds
@@ -832,8 +854,8 @@ Barrier Objects
This class provides a simple synchronization primitive for use by a fixed number
of threads that need to wait for each other. Each of the threads tries to pass
-the barrier by calling the :meth:`wait` method and will block until all of the
-threads have made the call. At this points, the threads are released
+the barrier by calling the :meth:`~Barrier.wait` method and will block until
+all of the threads have made the call. At this points, the threads are released
simultanously.
The barrier can be reused any number of times for the same number of threads.
@@ -934,19 +956,24 @@ Using locks, conditions, and semaphores in the :keyword:`with` statement
All of the objects provided by this module that have :meth:`acquire` and
:meth:`release` methods can be used as context managers for a :keyword:`with`
-statement. The :meth:`acquire` method will be called when the block is entered,
-and :meth:`release` will be called when the block is exited.
+statement. The :meth:`acquire` method will be called when the block is
+entered, and :meth:`release` will be called when the block is exited. Hence,
+the following snippet::
-Currently, :class:`Lock`, :class:`RLock`, :class:`Condition`,
-:class:`Semaphore`, and :class:`BoundedSemaphore` objects may be used as
-:keyword:`with` statement context managers. For example::
+ with some_lock:
+ # do something...
- import threading
+is equivalent to::
- some_rlock = threading.RLock()
+ some_lock.acquire()
+ try:
+ # do something...
+ finally:
+ some_lock.release()
- with some_rlock:
- print("some_rlock is locked while this executes")
+Currently, :class:`Lock`, :class:`RLock`, :class:`Condition`,
+:class:`Semaphore`, and :class:`BoundedSemaphore` objects may be used as
+:keyword:`with` statement context managers.
.. _threaded-imports:
diff --git a/Doc/library/time.rst b/Doc/library/time.rst
index 7c464ac245..7854fbd4da 100644
--- a/Doc/library/time.rst
+++ b/Doc/library/time.rst
@@ -430,8 +430,8 @@ The module defines the following functions and data items:
.. function:: time()
- Return the time as a floating point number expressed in seconds since the epoch,
- in UTC. Note that even though the time is always returned as a floating point
+ Return the time in seconds since the epoch as a floating point number.
+ Note that even though the time is always returned as a floating point
number, not all systems provide time with a better precision than 1 second.
While this function normally returns non-decreasing values, it can return a
lower value than a previous call if the system clock has been set back between
diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst
index bdf07a40cb..b130a8b088 100644
--- a/Doc/library/unittest.rst
+++ b/Doc/library/unittest.rst
@@ -640,7 +640,7 @@ This is the output of running the example above in verbose mode: ::
Classes can be skipped just like methods: ::
- @skip("showing class skipping")
+ @unittest.skip("showing class skipping")
class MySkippedTestCase(unittest.TestCase):
def test_not_run(self):
pass
diff --git a/Doc/library/urllib.parse.rst b/Doc/library/urllib.parse.rst
index aece7149ef..b33e8fe1ed 100644
--- a/Doc/library/urllib.parse.rst
+++ b/Doc/library/urllib.parse.rst
@@ -512,9 +512,10 @@ task isn't already covered by the URL parsing functions above.
Convert a mapping object or a sequence of two-element tuples, which may
either be a :class:`str` or a :class:`bytes`, to a "percent-encoded"
- string. The resultant string must be converted to bytes using the
- user-specified encoding before it is sent to :func:`urlopen` as the optional
- *data* argument.
+ string. If the resultant string is to be used as a *data* for POST
+ operation with :func:`urlopen` function, then it should be properly encoded
+ to bytes, otherwise it would result in a :exc:`TypeError`.
+
The resulting string is a series of ``key=value`` pairs separated by ``'&'``
characters, where both *key* and *value* are quoted using :func:`quote_plus`
above. When a sequence of two-element tuples is used as the *query*
diff --git a/Doc/library/urllib.request.rst b/Doc/library/urllib.request.rst
index cc759a4465..58f33e3a31 100644
--- a/Doc/library/urllib.request.rst
+++ b/Doc/library/urllib.request.rst
@@ -2,9 +2,10 @@
=============================================================
.. module:: urllib.request
- :synopsis: Next generation URL opening library.
+ :synopsis: Extensible library for opening URLs.
.. moduleauthor:: Jeremy Hylton <jeremy@alum.mit.edu>
.. sectionauthor:: Moshe Zadka <moshez@users.sourceforge.net>
+.. sectionauthor:: Senthil Kumaran <senthil@uthcode.com>
The :mod:`urllib.request` module defines functions and classes which help in
@@ -20,16 +21,26 @@ The :mod:`urllib.request` module defines the following functions:
Open the URL *url*, which can be either a string or a
:class:`Request` object.
- *data* may be a bytes object specifying additional data to send to the
+ *data* must be a bytes object specifying additional data to be sent to the
server, or ``None`` if no such data is needed. *data* may also be an
iterable object and in that case Content-Length value must be specified in
the headers. Currently HTTP requests are the only ones that use *data*; the
HTTP request will be a POST instead of a GET when the *data* parameter is
- provided. *data* should be a buffer in the standard
+ provided.
+
+ *data* should be a buffer in the standard
:mimetype:`application/x-www-form-urlencoded` format. The
:func:`urllib.parse.urlencode` function takes a mapping or sequence of
- 2-tuples and returns a string in this format. urllib.request module uses
- HTTP/1.1 and includes ``Connection:close`` header in its HTTP requests.
+ 2-tuples and returns a string in this format. It should be encoded to bytes
+ before being used as the *data* parameter. The charset parameter in
+ ``Content-Type`` header may be used to specify the encoding. If charset
+ parameter is not sent with the Content-Type header, the server following the
+ HTTP 1.1 recommendation may assume that the data is encoded in ISO-8859-1
+ encoding. It is advisable to use charset parameter with encoding used in
+ ``Content-Type`` header with the :class:`Request`.
+
+ urllib.request module uses HTTP/1.1 and includes ``Connection:close`` header
+ in its HTTP requests.
The optional *timeout* parameter specifies a timeout in seconds for
blocking operations like the connection attempt (if not specified,
@@ -46,8 +57,8 @@ The :mod:`urllib.request` module defines the following functions:
If neither *cafile* nor *capath* is specified, an HTTPS request
will not do any verification of the server's certificate.
- This function returns a file-like object with two additional methods from
- the :mod:`urllib.response` module
+ This function returns a file-like object that works as a :term:`context manager`,
+ with two additional methods from the :mod:`urllib.response` module
* :meth:`geturl` --- return the URL of the resource retrieved,
commonly used to determine if a redirect was followed
@@ -66,9 +77,10 @@ The :mod:`urllib.request` module defines the following functions:
are handled through the proxy when they are set.
The legacy ``urllib.urlopen`` function from Python 2.6 and earlier has been
- discontinued; :func:`urlopen` corresponds to the old ``urllib2.urlopen``.
- Proxy handling, which was done by passing a dictionary parameter to
- ``urllib.urlopen``, can be obtained by using :class:`ProxyHandler` objects.
+ discontinued; :func:`urllib.request.urlopen` corresponds to the old
+ ``urllib2.urlopen``. Proxy handling, which was done by passing a dictionary
+ parameter to ``urllib.urlopen``, can be obtained by using
+ :class:`ProxyHandler` objects.
.. versionchanged:: 3.2
*cafile* and *capath* were added.
@@ -83,10 +95,11 @@ The :mod:`urllib.request` module defines the following functions:
.. function:: install_opener(opener)
Install an :class:`OpenerDirector` instance as the default global opener.
- Installing an opener is only necessary if you want urlopen to use that opener;
- otherwise, simply call :meth:`OpenerDirector.open` instead of :func:`urlopen`.
- The code does not check for a real :class:`OpenerDirector`, and any class with
- the appropriate interface will work.
+ Installing an opener is only necessary if you want urlopen to use that
+ opener; otherwise, simply call :meth:`OpenerDirector.open` instead of
+ :func:`~urllib.request.urlopen`. The code does not check for a real
+ :class:`OpenerDirector`, and any class with the appropriate interface will
+ work.
.. function:: build_opener([handler, ...])
@@ -138,14 +151,21 @@ The following classes are provided:
*url* should be a string containing a valid URL.
- *data* may be a string specifying additional data to send to the
- server, or ``None`` if no such data is needed. Currently HTTP
- requests are the only ones that use *data*; the HTTP request will
- be a POST instead of a GET when the *data* parameter is provided.
- *data* should be a buffer in the standard
- :mimetype:`application/x-www-form-urlencoded` format. The
- :func:`urllib.parse.urlencode` function takes a mapping or sequence
- of 2-tuples and returns a string in this format.
+ *data* must be a bytes object specifying additional data to send to the
+ server, or ``None`` if no such data is needed. Currently HTTP requests are
+ the only ones that use *data*; the HTTP request will be a POST instead of a
+ GET when the *data* parameter is provided. *data* should be a buffer in the
+ standard :mimetype:`application/x-www-form-urlencoded` format.
+
+ The :func:`urllib.parse.urlencode` function takes a mapping or sequence of
+ 2-tuples and returns a string in this format. It should be encoded to bytes
+ before being used as the *data* parameter. The charset parameter in
+ ``Content-Type`` header may be used to specify the encoding. If charset
+ parameter is not sent with the Content-Type header, the server following the
+ HTTP 1.1 recommendation may assume that the data is encoded in ISO-8859-1
+ encoding. It is advisable to use charset parameter with encoding used in
+ ``Content-Type`` header with the :class:`Request`.
+
*headers* should be a dictionary, and will be treated as if
:meth:`add_header` was called with each key and value as arguments.
@@ -157,6 +177,9 @@ The following classes are provided:
:mod:`urllib`'s default user agent string is
``"Python-urllib/2.6"`` (on Python 2.6).
+ An example of using ``Content-Type`` header with *data* argument would be
+ sending a dictionary like ``{"Content-Type":" application/x-www-form-urlencoded;charset=utf-8"}``
+
The final two arguments are only of interest for correct handling
of third-party HTTP cookies:
@@ -967,8 +990,17 @@ The following W3C document, http://www.w3.org/International/O-charset , lists
the various ways in which a (X)HTML or a XML document could have specified its
encoding information.
-As python.org website uses *utf-8* encoding as specified in it's meta tag, we
-will use same for decoding the bytes object. ::
+As the python.org website uses *utf-8* encoding as specified in it's meta tag, we
+will use the same for decoding the bytes object. ::
+
+ >>> with urllib.request.urlopen('http://www.python.org/') as f:
+ ... print(f.read(100).decode('utf-8'))
+ ...
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtm
+
+It is also possible to achieve the same result without using the
+:term:`context manager` approach. ::
>>> import urllib.request
>>> f = urllib.request.urlopen('http://www.python.org/')
@@ -976,7 +1008,6 @@ will use same for decoding the bytes object. ::
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtm
-
In the following example, we are sending a data-stream to the stdin of a CGI
and reading the data it returns to us. Note that this example will only work
when the Python installation supports SSL. ::
@@ -1045,8 +1076,9 @@ every :class:`Request`. To change this::
opener.open('http://www.example.com/')
Also, remember that a few standard headers (:mailheader:`Content-Length`,
-:mailheader:`Content-Type` and :mailheader:`Host`) are added when the
-:class:`Request` is passed to :func:`urlopen` (or :meth:`OpenerDirector.open`).
+:mailheader:`Content-Type` without charset parameter and :mailheader:`Host`)
+are added when the :class:`Request` is passed to :func:`urlopen` (or
+:meth:`OpenerDirector.open`).
.. _urllib-examples:
@@ -1064,9 +1096,12 @@ from urlencode is encoded to bytes before it is sent to urlopen as data::
>>> import urllib.request
>>> import urllib.parse
- >>> params = urllib.parse.urlencode({'spam': 1, 'eggs': 2, 'bacon': 0})
- >>> params = params.encode('utf-8')
- >>> f = urllib.request.urlopen("http://www.musi-cal.com/cgi-bin/query", params)
+ >>> data = urllib.parse.urlencode({'spam': 1, 'eggs': 2, 'bacon': 0})
+ >>> data = data.encode('utf-8')
+ >>> request = urllib.request.Request("http://requestb.in/xrbl82xr")
+ >>> # adding charset parameter to the Content-Type header.
+ >>> request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
+ >>> f = urllib.request.urlopen(request, data)
>>> print(f.read().decode('utf-8'))
The following example uses an explicitly specified HTTP proxy, overriding
@@ -1114,10 +1149,10 @@ some point in the future.
size in response to a retrieval request.
If the *url* uses the :file:`http:` scheme identifier, the optional *data*
- argument may be given to specify a ``POST`` request (normally the request type
- is ``GET``). The *data* argument must in standard
- :mimetype:`application/x-www-form-urlencoded` format; see the :func:`urlencode`
- function below.
+ argument may be given to specify a ``POST`` request (normally the request
+ type is ``GET``). The *data* argument must be a bytes object in standard
+ :mimetype:`application/x-www-form-urlencoded` format; see the
+ :func:`urlencode` function below.
:func:`urlretrieve` will raise :exc:`ContentTooShortError` when it detects that
the amount of data available was less than the expected amount (which is the
diff --git a/Doc/library/webbrowser.rst b/Doc/library/webbrowser.rst
index 23ba6c5471..dfb09ee40e 100644
--- a/Doc/library/webbrowser.rst
+++ b/Doc/library/webbrowser.rst
@@ -137,6 +137,8 @@ for the controller classes, all defined in this module.
+-----------------------+-----------------------------------------+-------+
| ``'macosx'`` | :class:`MacOSX('default')` | \(4) |
+-----------------------+-----------------------------------------+-------+
+| ``'safari'`` | :class:`MacOSX('safari')` | \(4) |
++-----------------------+-----------------------------------------+-------+
Notes:
diff --git a/Doc/library/xml.dom.minidom.rst b/Doc/library/xml.dom.minidom.rst
index ab5476d91f..ae286b0ad9 100644
--- a/Doc/library/xml.dom.minidom.rst
+++ b/Doc/library/xml.dom.minidom.rst
@@ -15,6 +15,14 @@
Model interface. It is intended to be simpler than the full DOM and also
significantly smaller.
+.. note::
+
+ The :mod:`xml.dom.minidom` module provides an implementation of the W3C-DOM,
+ with an API similar to that in other programming languages. Users who are
+ unfamiliar with the W3C-DOM interface or who would like to write less code
+ for processing XML files should consider using the
+ :mod:`xml.etree.ElementTree` module instead.
+
DOM applications typically start by parsing some XML into a DOM. With
:mod:`xml.dom.minidom`, this is done through the parse functions::
diff --git a/Doc/library/xml.dom.pulldom.rst b/Doc/library/xml.dom.pulldom.rst
index 4a5ef4c135..eb16a0933d 100644
--- a/Doc/library/xml.dom.pulldom.rst
+++ b/Doc/library/xml.dom.pulldom.rst
@@ -9,34 +9,72 @@
--------------
-:mod:`xml.dom.pulldom` allows building only selected portions of a Document
-Object Model representation of a document from SAX events.
+The :mod:`xml.dom.pulldom` module provides a "pull parser" which can also be
+asked to produce DOM-accessible fragments of the document where necessary. The
+basic concept involves pulling "events" from a stream of incoming XML and
+processing them. In contrast to SAX which also employs an event-driven
+processing model together with callbacks, the user of a pull parser is
+responsible for explicitly pulling events from the stream, looping over those
+events until either processing is finished or an error condition occurs.
+Example::
-.. class:: PullDOM(documentFactory=None)
+ from xml.dom import pulldom
- :class:`xml.sax.handler.ContentHandler` implementation that ...
+ doc = pulldom.parse('sales_items.xml')
+ for event, node in doc:
+ if event == pulldom.START_ELEMENT and node.tagName == 'item':
+ if int(node.getAttribute('price')) > 50:
+ doc.expandNode(node)
+ print(node.toxml())
+``event`` is a constant and can be one of:
-.. class:: DOMEventStream(stream, parser, bufsize)
+* :data:`START_ELEMENT`
+* :data:`END_ELEMENT`
+* :data:`COMMENT`
+* :data:`START_DOCUMENT`
+* :data:`END_DOCUMENT`
+* :data:`CHARACTERS`
+* :data:`PROCESSING_INSTRUCTION`
+* :data:`IGNORABLE_WHITESPACE`
+
+``node`` is a object of type :class:`xml.dom.minidom.Document`,
+:class:`xml.dom.minidom.Element` or :class:`xml.dom.minidom.Text`.
+
+Since the document is treated as a "flat" stream of events, the document "tree"
+is implicitly traversed and the desired elements are found regardless of their
+depth in the tree. In other words, one does not need to consider hierarchical
+issues such as recursive searching of the document nodes, although if the
+context of elements were important, one would either need to maintain some
+context-related state (i.e. remembering where one is in the document at any
+given point) or to make use of the :func:`DOMEventStream.expandNode` method
+and switch to DOM-related processing.
+
+
+.. class:: PullDom(documentFactory=None)
- ...
+ Subclass of :class:`xml.sax.handler.ContentHandler`.
.. class:: SAX2DOM(documentFactory=None)
- :class:`xml.sax.handler.ContentHandler` implementation that ...
+ Subclass of :class:`xml.sax.handler.ContentHandler`.
.. function:: parse(stream_or_string, parser=None, bufsize=None)
- ...
+ Return a :class:`DOMEventStream` from the given input. *stream_or_string* may be
+ either a file name, or a file-like object. *parser*, if given, must be a
+ :class:`XmlReader` object. This function will change the document handler of the
+ parser and activate namespace support; other parser configuration (like
+ setting an entity resolver) must have been done in advance.
+If you have XML in a string, you can use the :func:`parseString` function instead:
.. function:: parseString(string, parser=None)
- ...
-
+ Return a :class:`DOMEventStream` that represents the (Unicode) *string*.
.. data:: default_bufsize
@@ -45,24 +83,37 @@ Object Model representation of a document from SAX events.
The value of this variable can be changed before calling :func:`parse` and
the new value will take effect.
-
.. _domeventstream-objects:
DOMEventStream Objects
----------------------
+.. class:: DOMEventStream(stream, parser, bufsize)
-.. method:: DOMEventStream.getEvent()
-
- ...
+ .. method:: getEvent()
-.. method:: DOMEventStream.expandNode(node)
+ Return a tuple containing *event* and the current *node* as
+ :class:`xml.dom.minidom.Document` if event equals :data:`START_DOCUMENT`,
+ :class:`xml.dom.minidom.Element` if event equals :data:`START_ELEMENT` or
+ :data:`END_ELEMENT` or :class:`xml.dom.minidom.Text` if event equals
+ :data:`CHARACTERS`.
+ The current node does not contain informations about its children, unless
+ :func:`expandNode` is called.
- ...
+ .. method:: expandNode(node)
+ Expands all children of *node* into *node*. Example::
-.. method:: DOMEventStream.reset()
+ xml = '<html><title>Foo</title> <p>Some text <div>and more</div></p> </html>'
+ doc = pulldom.parseString(xml)
+ for event, node in doc:
+ if event == pulldom.START_ELEMENT and node.tagName == 'p':
+ # Following statement only prints '<p/>'
+ print(node.toxml())
+ doc.exandNode(node)
+ # Following statement prints node with all its children '<p>Some text <div>and more</div></p>'
+ print(node.toxml())
- ...
+ .. method:: DOMEventStream.reset()
diff --git a/Doc/library/xml.etree.elementtree.rst b/Doc/library/xml.etree.elementtree.rst
index a46d99d969..c5c880290c 100644
--- a/Doc/library/xml.etree.elementtree.rst
+++ b/Doc/library/xml.etree.elementtree.rst
@@ -95,11 +95,14 @@ Functions
.. function:: iterparse(source, events=None, parser=None)
Parses an XML section into an element tree incrementally, and reports what's
- going on to the user. *source* is a filename or :term:`file object` containing
- XML data. *events* is a list of events to report back. If omitted, only "end"
- events are reported. *parser* is an optional parser instance. If not
- given, the standard :class:`XMLParser` parser is used. Returns an
- :term:`iterator` providing ``(event, elem)`` pairs.
+ going on to the user. *source* is a filename or :term:`file object`
+ containing XML data. *events* is a list of events to report back. The
+ supported events are the strings ``"start"``, ``"end"``, ``"start-ns"``
+ and ``"end-ns"`` (the "ns" events are used to get detailed namespace
+ information). If *events* is omitted, only ``"end"`` events are reported.
+ *parser* is an optional parser instance. If not given, the standard
+ :class:`XMLParser` parser is used. Returns an :term:`iterator` providing
+ ``(event, elem)`` pairs.
.. note::
diff --git a/Doc/reference/compound_stmts.rst b/Doc/reference/compound_stmts.rst
index aea08e0805..4ce7324550 100644
--- a/Doc/reference/compound_stmts.rst
+++ b/Doc/reference/compound_stmts.rst
@@ -535,6 +535,11 @@ returned or passed around. Free variables used in the nested function can
access the local variables of the function containing the def. See section
:ref:`naming` for details.
+.. seealso::
+
+ :pep:`3107` - Function Annotations
+ The original specification for function annotations.
+
.. _class:
diff --git a/Doc/tools/sphinxext/download.html b/Doc/tools/sphinxext/download.html
index f89c4585f0..31a53cfb39 100644
--- a/Doc/tools/sphinxext/download.html
+++ b/Doc/tools/sphinxext/download.html
@@ -39,9 +39,13 @@ in the table are the size of the download files in megabytes.</p>
</tr>
</table>
-
<p>These archives contain all the content in the documentation.</p>
+<p>HTML Help (<tt>.chm</tt>) files are made available in the "Windows" section
+on the <a href="http://python.org/download/releases/{{ release[:5] }}/">Python
+download page</a>.</p>
+
+
<h2>Unpacking</h2>
<p>Unix users should download the .tar.bz2 archives; these are bzipped tar
diff --git a/Doc/tools/sphinxext/layout.html b/Doc/tools/sphinxext/layout.html
index d4bb105e83..db4a386e89 100644
--- a/Doc/tools/sphinxext/layout.html
+++ b/Doc/tools/sphinxext/layout.html
@@ -2,6 +2,7 @@
{% block rootrellink %}
<li><img src="{{ pathto('_static/py.png', 1) }}" alt=""
style="vertical-align: middle; margin-top: -1px"/></li>
+ <li><a href="http://www.python.org/">Python</a>{{ reldelim1 }}</li>
<li><a href="{{ pathto('index') }}">{{ shorttitle }}</a>{{ reldelim1 }}</li>
{% endblock %}
{% block extrahead %}
diff --git a/Doc/tools/sphinxext/pydoctheme/static/pydoctheme.css b/Doc/tools/sphinxext/pydoctheme/static/pydoctheme.css
new file mode 100644
index 0000000000..9942ca631d
--- /dev/null
+++ b/Doc/tools/sphinxext/pydoctheme/static/pydoctheme.css
@@ -0,0 +1,170 @@
+@import url("default.css");
+
+body {
+ background-color: white;
+ margin-left: 1em;
+ margin-right: 1em;
+}
+
+div.related {
+ margin-bottom: 1.2em;
+ padding: 0.5em 0;
+ border-top: 1px solid #ccc;
+ margin-top: 0.5em;
+}
+
+div.related a:hover {
+ color: #0095C4;
+}
+
+div.related:first-child {
+ border-top: 0;
+ border-bottom: 1px solid #ccc;
+}
+
+div.sphinxsidebar {
+ background-color: #eeeeee;
+ border-radius: 5px;
+ line-height: 130%;
+ font-size: smaller;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+ margin-top: 1.5em;
+}
+
+div.sphinxsidebarwrapper > h3:first-child {
+ margin-top: 0.2em;
+}
+
+div.sphinxsidebarwrapper > ul > li > ul > li {
+ margin-bottom: 0.4em;
+}
+
+div.sphinxsidebar a:hover {
+ color: #0095C4;
+}
+
+div.sphinxsidebar input {
+ font-family: 'Lucida Grande',Arial,sans-serif;
+ border: 1px solid #999999;
+ font-size: smaller;
+ border-radius: 3px;
+}
+
+div.sphinxsidebar input[type=text] {
+ max-width: 150px;
+}
+
+div.body {
+ padding: 0 0 0 1.2em;
+}
+
+div.body p {
+ line-height: 140%;
+}
+
+div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 {
+ margin: 0;
+ border: 0;
+ padding: 0.3em 0;
+}
+
+div.body hr {
+ border: 0;
+ background-color: #ccc;
+ height: 1px;
+}
+
+div.body pre {
+ border-radius: 3px;
+ border: 1px solid #ac9;
+}
+
+div.body div.admonition, div.body div.impl-detail {
+ border-radius: 3px;
+}
+
+div.body div.impl-detail > p {
+ margin: 0;
+}
+
+div.body div.seealso {
+ border: 1px solid #dddd66;
+}
+
+div.body a {
+ color: #00608f;
+}
+
+div.body a:visited {
+ color: #30306f;
+}
+
+div.body a:hover {
+ color: #00B0E4;
+}
+
+tt, pre {
+ font-family: monospace, sans-serif;
+ font-size: 96.5%;
+}
+
+div.body tt {
+ border-radius: 3px;
+}
+
+div.body tt.descname {
+ font-size: 120%;
+}
+
+div.body tt.xref, div.body a tt {
+ font-weight: normal;
+}
+
+p.deprecated {
+ border-radius: 3px;
+}
+
+table.docutils {
+ border: 1px solid #ddd;
+ min-width: 20%;
+ border-radius: 3px;
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+
+table.docutils td, table.docutils th {
+ border: 1px solid #ddd !important;
+ border-radius: 3px;
+}
+
+table p, table li {
+ text-align: left !important;
+}
+
+table.docutils th {
+ background-color: #eee;
+ padding: 0.3em 0.5em;
+}
+
+table.docutils td {
+ background-color: white;
+ padding: 0.3em 0.5em;
+}
+
+table.footnote, table.footnote td {
+ border: 0 !important;
+}
+
+div.footer {
+ line-height: 150%;
+ margin-top: -2em;
+ text-align: right;
+ width: auto;
+ margin-right: 10px;
+}
+
+div.footer a:hover {
+ color: #0095C4;
+}
diff --git a/Doc/tools/sphinxext/pydoctheme/theme.conf b/Doc/tools/sphinxext/pydoctheme/theme.conf
new file mode 100644
index 0000000000..0c43881674
--- /dev/null
+++ b/Doc/tools/sphinxext/pydoctheme/theme.conf
@@ -0,0 +1,23 @@
+[theme]
+inherit = default
+stylesheet = pydoctheme.css
+pygments_style = sphinx
+
+[options]
+bodyfont = 'Lucida Grande', Arial, sans-serif
+headfont = 'Lucida Grande', Arial, sans-serif
+footerbgcolor = white
+footertextcolor = #555555
+relbarbgcolor = white
+relbartextcolor = #666666
+relbarlinkcolor = #444444
+sidebarbgcolor = white
+sidebartextcolor = #444444
+sidebarlinkcolor = #444444
+bgcolor = white
+textcolor = #222222
+linkcolor = #0090c0
+visitedlinkcolor = #00608f
+headtextcolor = #1a1a1a
+headbgcolor = white
+headlinkcolor = #aaaaaa
diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py
index 43292816a2..89bb86fc22 100644
--- a/Doc/tools/sphinxext/pyspecific.py
+++ b/Doc/tools/sphinxext/pyspecific.py
@@ -27,10 +27,10 @@ def new_visit_versionmodified(self, node):
self.body.append(self.starttag(node, 'p', CLASS=node['type']))
text = versionlabels[node['type']] % node['version']
if len(node):
- text += ': '
+ text += ':'
else:
text += '.'
- self.body.append('<span class="versionmodified">%s</span>' % text)
+ self.body.append('<span class="versionmodified">%s</span> ' % text)
from sphinx.writers.html import HTMLTranslator
from sphinx.locale import versionlabels
diff --git a/Doc/tools/sphinxext/static/copybutton.js b/Doc/tools/sphinxext/static/copybutton.js
index a3b1099e3a..5d82c672be 100644
--- a/Doc/tools/sphinxext/static/copybutton.js
+++ b/Doc/tools/sphinxext/static/copybutton.js
@@ -17,7 +17,8 @@ $(document).ready(function() {
'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0',
'border-color': border_color, 'border-style': border_style,
'border-width': border_width, 'color': border_color, 'text-size': '75%',
- 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em'
+ 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em',
+ 'border-radius': '0 3px 0 0'
}
// create and add the button to all the code blocks that contain >>>
diff --git a/Doc/tools/sphinxext/static/sidebar.js b/Doc/tools/sphinxext/static/sidebar.js
new file mode 100644
index 0000000000..0c410e6aa8
--- /dev/null
+++ b/Doc/tools/sphinxext/static/sidebar.js
@@ -0,0 +1,155 @@
+/*
+ * sidebar.js
+ * ~~~~~~~~~~
+ *
+ * This script makes the Sphinx sidebar collapsible.
+ *
+ * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds in
+ * .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton used to
+ * collapse and expand the sidebar.
+ *
+ * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden and the
+ * width of the sidebar and the margin-left of the document are decreased.
+ * When the sidebar is expanded the opposite happens. This script saves a
+ * per-browser/per-session cookie used to remember the position of the sidebar
+ * among the pages. Once the browser is closed the cookie is deleted and the
+ * position reset to the default (expanded).
+ *
+ * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+$(function() {
+ // global elements used by the functions.
+ // the 'sidebarbutton' element is defined as global after its
+ // creation, in the add_sidebar_button function
+ var bodywrapper = $('.bodywrapper');
+ var sidebar = $('.sphinxsidebar');
+ var sidebarwrapper = $('.sphinxsidebarwrapper');
+
+ // original margin-left of the bodywrapper and width of the sidebar
+ // with the sidebar expanded
+ var bw_margin_expanded = bodywrapper.css('margin-left');
+ var ssb_width_expanded = sidebar.width();
+
+ // margin-left of the bodywrapper and width of the sidebar
+ // with the sidebar collapsed
+ var bw_margin_collapsed = '.8em';
+ var ssb_width_collapsed = '.8em';
+
+ // colors used by the current theme
+ var dark_color = '#AAAAAA';
+ var light_color = '#CCCCCC';
+
+ function sidebar_is_collapsed() {
+ return sidebarwrapper.is(':not(:visible)');
+ }
+
+ function toggle_sidebar() {
+ if (sidebar_is_collapsed())
+ expand_sidebar();
+ else
+ collapse_sidebar();
+ }
+
+ function collapse_sidebar() {
+ sidebarwrapper.hide();
+ sidebar.css('width', ssb_width_collapsed);
+ bodywrapper.css('margin-left', bw_margin_collapsed);
+ sidebarbutton.css({
+ 'margin-left': '0',
+ 'height': bodywrapper.height(),
+ 'border-radius': '5px'
+ });
+ sidebarbutton.find('span').text('»');
+ sidebarbutton.attr('title', _('Expand sidebar'));
+ document.cookie = 'sidebar=collapsed';
+ }
+
+ function expand_sidebar() {
+ bodywrapper.css('margin-left', bw_margin_expanded);
+ sidebar.css('width', ssb_width_expanded);
+ sidebarwrapper.show();
+ sidebarbutton.css({
+ 'margin-left': ssb_width_expanded-12,
+ 'height': bodywrapper.height(),
+ 'border-radius': '0 5px 5px 0'
+ });
+ sidebarbutton.find('span').text('«');
+ sidebarbutton.attr('title', _('Collapse sidebar'));
+ //sidebarwrapper.css({'padding-top':
+ // Math.max(window.pageYOffset - sidebarwrapper.offset().top, 10)});
+ document.cookie = 'sidebar=expanded';
+ }
+
+ function add_sidebar_button() {
+ sidebarwrapper.css({
+ 'float': 'left',
+ 'margin-right': '0',
+ 'width': ssb_width_expanded - 28
+ });
+ // create the button
+ sidebar.append(
+ '<div id="sidebarbutton"><span>&laquo;</span></div>'
+ );
+ var sidebarbutton = $('#sidebarbutton');
+ // find the height of the viewport to center the '<<' in the page
+ var viewport_height;
+ if (window.innerHeight)
+ viewport_height = window.innerHeight;
+ else
+ viewport_height = $(window).height();
+ var sidebar_offset = sidebar.offset().top;
+ var sidebar_height = Math.max(bodywrapper.height(), sidebar.height());
+ sidebarbutton.find('span').css({
+ 'display': 'block',
+ 'position': 'fixed',
+ 'top': Math.min(viewport_height/2, sidebar_height/2 + sidebar_offset) - 10
+ });
+
+ sidebarbutton.click(toggle_sidebar);
+ sidebarbutton.attr('title', _('Collapse sidebar'));
+ sidebarbutton.css({
+ 'border-radius': '0 5px 5px 0',
+ 'color': '#444444',
+ 'background-color': '#CCCCCC',
+ 'font-size': '1.2em',
+ 'cursor': 'pointer',
+ 'height': sidebar_height,
+ 'padding-top': '1px',
+ 'padding-left': '1px',
+ 'margin-left': ssb_width_expanded - 12
+ });
+
+ sidebarbutton.hover(
+ function () {
+ $(this).css('background-color', dark_color);
+ },
+ function () {
+ $(this).css('background-color', light_color);
+ }
+ );
+ }
+
+ function set_position_from_cookie() {
+ if (!document.cookie)
+ return;
+ var items = document.cookie.split(';');
+ for(var k=0; k<items.length; k++) {
+ var key_val = items[k].split('=');
+ var key = key_val[0];
+ if (key == 'sidebar') {
+ var value = key_val[1];
+ if ((value == 'collapsed') && (!sidebar_is_collapsed()))
+ collapse_sidebar();
+ else if ((value == 'expanded') && (sidebar_is_collapsed()))
+ expand_sidebar();
+ }
+ }
+ }
+
+ add_sidebar_button();
+ var sidebarbutton = $('#sidebarbutton');
+ set_position_from_cookie();
+});
diff --git a/Doc/tutorial/classes.rst b/Doc/tutorial/classes.rst
index 68c4e5d886..5ce3669258 100644
--- a/Doc/tutorial/classes.rst
+++ b/Doc/tutorial/classes.rst
@@ -180,7 +180,10 @@ binding::
scope_test()
print("In global scope:", spam)
-The output of the example code is::
+The output of the example code is:
+
+.. code-block:: none
+
After local assignment: test spam
After nonlocal assignment: nonlocal spam
diff --git a/Include/Python.h b/Include/Python.h
index 5972ffa592..d6e47c2463 100644
--- a/Include/Python.h
+++ b/Include/Python.h
@@ -100,7 +100,6 @@
#include "warnings.h"
#include "weakrefobject.h"
#include "structseq.h"
-#include "accu.h"
#include "codecs.h"
#include "pyerrors.h"
diff --git a/Include/patchlevel.h b/Include/patchlevel.h
index 65584db67d..0839071c76 100644
--- a/Include/patchlevel.h
+++ b/Include/patchlevel.h
@@ -2,7 +2,7 @@
/* Python version identification scheme.
When the major or minor version changes, the VERSION variable in
- configure.in must also be changed.
+ configure.ac must also be changed.
There is also (independent) API version information in modsupport.h.
*/
diff --git a/Lib/_weakrefset.py b/Lib/_weakrefset.py
index 42653699a3..6a98b88e33 100644
--- a/Lib/_weakrefset.py
+++ b/Lib/_weakrefset.py
@@ -63,7 +63,7 @@ class WeakSet:
yield item
def __len__(self):
- return sum(x() is not None for x in self.data)
+ return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
@@ -114,36 +114,21 @@ class WeakSet:
def update(self, other):
if self._pending_removals:
self._commit_removals()
- if isinstance(other, self.__class__):
- self.data.update(other.data)
- else:
- for element in other:
- self.add(element)
+ for element in other:
+ self.add(element)
def __ior__(self, other):
self.update(other)
return self
- # Helper functions for simple delegating methods.
- def _apply(self, other, method):
- if not isinstance(other, self.__class__):
- other = self.__class__(other)
- newdata = method(other.data)
- newset = self.__class__()
- newset.data = newdata
- return newset
-
def difference(self, other):
- return self._apply(other, self.data.difference)
+ newset = self.copy()
+ newset.difference_update(other)
+ return newset
__sub__ = difference
def difference_update(self, other):
- if self._pending_removals:
- self._commit_removals()
- if self is other:
- self.data.clear()
- else:
- self.data.difference_update(ref(item) for item in other)
+ self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
@@ -154,13 +139,11 @@ class WeakSet:
return self
def intersection(self, other):
- return self._apply(other, self.data.intersection)
+ return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
- if self._pending_removals:
- self._commit_removals()
- self.data.intersection_update(ref(item) for item in other)
+ self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
@@ -169,17 +152,17 @@ class WeakSet:
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
- __lt__ = issubset
+ __le__ = issubset
- def __le__(self, other):
- return self.data <= set(ref(item) for item in other)
+ def __lt__(self, other):
+ return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
- __gt__ = issuperset
+ __ge__ = issuperset
- def __ge__(self, other):
- return self.data >= set(ref(item) for item in other)
+ def __gt__(self, other):
+ return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
@@ -187,27 +170,24 @@ class WeakSet:
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
- return self._apply(other, self.data.symmetric_difference)
+ newset = self.copy()
+ newset.symmetric_difference_update(other)
+ return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
- if self._pending_removals:
- self._commit_removals()
- if self is other:
- self.data.clear()
- else:
- self.data.symmetric_difference_update(ref(item) for item in other)
+ self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
- self.data.symmetric_difference_update(ref(item) for item in other)
+ self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
- return self._apply(other, self.data.union)
+ return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
diff --git a/Lib/asyncore.py b/Lib/asyncore.py
index 7f42d39f33..b06077fd61 100644
--- a/Lib/asyncore.py
+++ b/Lib/asyncore.py
@@ -225,6 +225,7 @@ class dispatcher:
debug = False
connected = False
accepting = False
+ connecting = False
closing = False
addr = None
ignore_log_types = frozenset(['warning'])
@@ -248,7 +249,7 @@ class dispatcher:
try:
self.addr = sock.getpeername()
except socket.error as err:
- if err.args[0] == ENOTCONN:
+ if err.args[0] in (ENOTCONN, EINVAL):
# To handle the case where we got an unconnected
# socket.
self.connected = False
@@ -342,9 +343,11 @@ class dispatcher:
def connect(self, address):
self.connected = False
+ self.connecting = True
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
or err == EINVAL and os.name in ('nt', 'ce'):
+ self.addr = address
return
if err in (0, EISCONN):
self.addr = address
@@ -400,6 +403,7 @@ class dispatcher:
def close(self):
self.connected = False
self.accepting = False
+ self.connecting = False
self.del_channel()
try:
self.socket.close()
@@ -438,7 +442,8 @@ class dispatcher:
# sockets that are connected
self.handle_accept()
elif not self.connected:
- self.handle_connect_event()
+ if self.connecting:
+ self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
@@ -449,6 +454,7 @@ class dispatcher:
raise socket.error(err, _strerror(err))
self.handle_connect()
self.connected = True
+ self.connecting = False
def handle_write_event(self):
if self.accepting:
@@ -457,12 +463,8 @@ class dispatcher:
return
if not self.connected:
- #check for errors
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- if err != 0:
- raise socket.error(err, _strerror(err))
-
- self.handle_connect_event()
+ if self.connecting:
+ self.handle_connect_event()
self.handle_write()
def handle_expt_event(self):
diff --git a/Lib/concurrent/futures/_base.py b/Lib/concurrent/futures/_base.py
index 79b91d495f..9f11f6977f 100644
--- a/Lib/concurrent/futures/_base.py
+++ b/Lib/concurrent/futures/_base.py
@@ -112,12 +112,14 @@ class _AllCompletedWaiter(_Waiter):
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
+ self.lock = threading.Lock()
super().__init__()
def _decrement_pending_calls(self):
- self.num_pending_calls -= 1
- if not self.num_pending_calls:
- self.event.set()
+ with self.lock:
+ self.num_pending_calls -= 1
+ if not self.num_pending_calls:
+ self.event.set()
def add_result(self, future):
super().add_result(future)
diff --git a/Lib/distutils/tests/test_bdist_msi.py b/Lib/distutils/tests/test_bdist_msi.py
index 9308c79d91..15d8bdff2b 100644
--- a/Lib/distutils/tests/test_bdist_msi.py
+++ b/Lib/distutils/tests/test_bdist_msi.py
@@ -1,12 +1,11 @@
"""Tests for distutils.command.bdist_msi."""
-import unittest
import sys
-
+import unittest
from test.support import run_unittest
-
from distutils.tests import support
-@unittest.skipUnless(sys.platform=="win32", "These tests are only for win32")
+
+@unittest.skipUnless(sys.platform == 'win32', 'these tests require Windows')
class BDistMSITestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
@@ -14,10 +13,11 @@ class BDistMSITestCase(support.TempdirManager,
def test_minimal(self):
# minimal test XXX need more tests
from distutils.command.bdist_msi import bdist_msi
- pkg_pth, dist = self.create_dist()
+ project_dir, dist = self.create_dist()
cmd = bdist_msi(dist)
cmd.ensure_finalized()
+
def test_suite():
return unittest.makeSuite(BDistMSITestCase)
diff --git a/Lib/distutils/tests/test_sdist.py b/Lib/distutils/tests/test_sdist.py
index fd71dac8d6..1ba2a1a6a9 100644
--- a/Lib/distutils/tests/test_sdist.py
+++ b/Lib/distutils/tests/test_sdist.py
@@ -6,6 +6,7 @@ import warnings
import zipfile
from os.path import join
from textwrap import dedent
+from test.support import captured_stdout, check_warnings, run_unittest
try:
import zlib
@@ -13,7 +14,6 @@ try:
except ImportError:
ZLIB_SUPPORT = False
-from test.support import captured_stdout, check_warnings, run_unittest
from distutils.command.sdist import sdist, show_formats
from distutils.core import Distribution
@@ -326,6 +326,7 @@ class SDistTestCase(PyPIRCCommandTestCase):
# filling data_files by pointing files in package_data
dist.package_data = {'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
+ cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
diff --git a/Lib/doctest.py b/Lib/doctest.py
index 234733e565..cc3b425075 100644
--- a/Lib/doctest.py
+++ b/Lib/doctest.py
@@ -2266,7 +2266,8 @@ class DocTestCase(unittest.TestCase):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
- def __init__(self):
+ def __init__(self, module):
+ self.module = module
DocTestCase.__init__(self, None)
def setUp(self):
@@ -2276,7 +2277,10 @@ class SkipDocTestCase(DocTestCase):
pass
def shortDescription(self):
- return "Skipping tests from %s" % module.__name__
+ return "Skipping tests from %s" % self.module.__name__
+
+ __str__ = shortDescription
+
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
@@ -2324,7 +2328,7 @@ def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = unittest.TestSuite()
- suite.addTest(SkipDocTestCase())
+ suite.addTest(SkipDocTestCase(module))
return suite
elif not tests:
# Why do we want to do this? Because it reveals a bug that might
diff --git a/Lib/email/__init__.py b/Lib/email/__init__.py
index bd316fdaf3..ff16f6af3f 100644
--- a/Lib/email/__init__.py
+++ b/Lib/email/__init__.py
@@ -11,6 +11,7 @@ __all__ = [
'charset',
'encoders',
'errors',
+ 'feedparser',
'generator',
'header',
'iterators',
diff --git a/Lib/email/feedparser.py b/Lib/email/feedparser.py
index 60a83255c0..aa8a2ffa64 100644
--- a/Lib/email/feedparser.py
+++ b/Lib/email/feedparser.py
@@ -19,7 +19,7 @@ the current message. Defects are just instances that live on the message
object's .defects attribute.
"""
-__all__ = ['FeedParser']
+__all__ = ['FeedParser', 'BytesFeedParser']
import re
diff --git a/Lib/email/generator.py b/Lib/email/generator.py
index f0e7a95477..04c0210183 100644
--- a/Lib/email/generator.py
+++ b/Lib/email/generator.py
@@ -4,7 +4,7 @@
"""Classes to generate plain text from a message object tree."""
-__all__ = ['Generator', 'DecodedGenerator']
+__all__ = ['Generator', 'DecodedGenerator', 'BytesGenerator']
import re
import sys
@@ -360,7 +360,7 @@ class BytesGenerator(Generator):
for h, v in msg._headers:
self.write('%s: ' % h)
if isinstance(v, Header):
- self.write(v.encode(maxlinelen=self._maxheaderlen)+NL)
+ self.write(v.encode(maxlinelen=self._maxheaderlen)+self._NL)
elif _has_surrogates(v):
# If we have raw 8bit data in a byte string, we have no idea
# what the encoding is. There is no safe way to split this
diff --git a/Lib/email/header.py b/Lib/email/header.py
index 2e687b7a6f..3250d367ed 100644
--- a/Lib/email/header.py
+++ b/Lib/email/header.py
@@ -283,7 +283,12 @@ class Header:
# character set, otherwise an early error is thrown.
output_charset = charset.output_codec or 'us-ascii'
if output_charset != _charset.UNKNOWN8BIT:
- s.encode(output_charset, errors)
+ try:
+ s.encode(output_charset, errors)
+ except UnicodeEncodeError:
+ if output_charset!='us-ascii':
+ raise
+ charset = UTF8
self._chunks.append((s, charset))
def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'):
diff --git a/Lib/email/parser.py b/Lib/email/parser.py
index 6caaff53ad..1c931ea9de 100644
--- a/Lib/email/parser.py
+++ b/Lib/email/parser.py
@@ -4,7 +4,7 @@
"""A parser of RFC 2822 and MIME email messages."""
-__all__ = ['Parser', 'HeaderParser']
+__all__ = ['Parser', 'HeaderParser', 'BytesParser']
import warnings
from io import StringIO, TextIOWrapper
diff --git a/Lib/email/test/test_email.py b/Lib/email/test/test_email.py
index 102e15b9ff..5db34dc9d7 100644
--- a/Lib/email/test/test_email.py
+++ b/Lib/email/test/test_email.py
@@ -619,6 +619,19 @@ class TestMessageAPI(TestEmailBase):
msg['Dummy'] = 'dummy\nX-Injected-Header: test'
self.assertRaises(errors.HeaderParseError, msg.as_string)
+ def test_unicode_header_defaults_to_utf8_encoding(self):
+ # Issue 14291
+ m = MIMEText('abc\n')
+ m['Subject'] = 'É test'
+ self.assertEqual(str(m),textwrap.dedent("""\
+ Content-Type: text/plain; charset="us-ascii"
+ MIME-Version: 1.0
+ Content-Transfer-Encoding: 7bit
+ Subject: =?utf-8?q?=C3=89_test?=
+
+ abc
+ """))
+
# Test the email.encoders module
class TestEncoders(unittest.TestCase):
@@ -1060,9 +1073,13 @@ Subject: =?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offshore-W
'f\xfcr Offshore-Windkraftprojekte '
'<a-very-long-address@example.com>')
msg['Reply-To'] = header_string
- self.assertRaises(UnicodeEncodeError, msg.as_string)
+ eq(msg.as_string(maxheaderlen=78), """\
+Reply-To: =?utf-8?q?Britische_Regierung_gibt_gr=C3=BCnes_Licht_f=C3=BCr_Offs?=
+ =?utf-8?q?hore-Windkraftprojekte_=3Ca-very-long-address=40example=2Ecom=3E?=
+
+""")
msg = Message()
- msg['Reply-To'] = Header(header_string, 'utf-8',
+ msg['Reply-To'] = Header(header_string,
header_name='Reply-To')
eq(msg.as_string(maxheaderlen=78), """\
Reply-To: =?utf-8?q?Britische_Regierung_gibt_gr=C3=BCnes_Licht_f=C3=BCr_Offs?=
@@ -1226,7 +1243,6 @@ List: List-Unsubscribe:
=?utf-8?q?_folding_white_space_works?=""")+'\n')
-
# Test mangling of "From " lines in the body of a message
class TestFromMangling(unittest.TestCase):
def setUp(self):
@@ -2502,14 +2518,11 @@ class TestMiscellaneous(TestEmailBase):
def test__all__(self):
module = __import__('email')
- # Can't use sorted() here due to Python 2.3 compatibility
- all = module.__all__[:]
- all.sort()
- self.assertEqual(all, [
- 'base64mime', 'charset', 'encoders', 'errors', 'generator',
- 'header', 'iterators', 'message', 'message_from_binary_file',
- 'message_from_bytes', 'message_from_file',
- 'message_from_string', 'mime', 'parser',
+ self.assertEqual(sorted(module.__all__), [
+ 'base64mime', 'charset', 'encoders', 'errors', 'feedparser',
+ 'generator', 'header', 'iterators', 'message',
+ 'message_from_binary_file', 'message_from_bytes',
+ 'message_from_file', 'message_from_string', 'mime', 'parser',
'quoprimime', 'utils',
])
@@ -3424,6 +3437,30 @@ class Test8BitBytesHandling(unittest.TestCase):
g.flatten(msg)
self.assertEqual(s.getvalue(), source)
+ def test_bytes_generator_b_encoding_linesep(self):
+ # Issue 14062: b encoding was tacking on an extra \n.
+ m = Message()
+ # This has enough non-ascii that it should always end up b encoded.
+ m['Subject'] = Header('žluťoučký kůň')
+ s = BytesIO()
+ g = email.generator.BytesGenerator(s)
+ g.flatten(m, linesep='\r\n')
+ self.assertEqual(
+ s.getvalue(),
+ b'Subject: =?utf-8?b?xb5sdcWlb3XEjWvDvSBrxa/FiA==?=\r\n\r\n')
+
+ def test_generator_b_encoding_linesep(self):
+ # Since this broke in ByteGenerator, test Generator for completeness.
+ m = Message()
+ # This has enough non-ascii that it should always end up b encoded.
+ m['Subject'] = Header('žluťoučký kůň')
+ s = StringIO()
+ g = email.generator.Generator(s)
+ g.flatten(m, linesep='\r\n')
+ self.assertEqual(
+ s.getvalue(),
+ 'Subject: =?utf-8?b?xb5sdcWlb3XEjWvDvSBrxa/FiA==?=\r\n\r\n')
+
maxDiff = None
diff --git a/Lib/http/server.py b/Lib/http/server.py
index 6642729af5..831c79e114 100644
--- a/Lib/http/server.py
+++ b/Lib/http/server.py
@@ -945,8 +945,11 @@ class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
"""
splitpath = _url_collapse_path_split(self.path)
- if splitpath[0] in self.cgi_directories:
- self.cgi_info = splitpath
+ joined_path = '/'.join(splitpath)
+ dir_sep = joined_path.find('/',1)
+ head, tail = joined_path[:dir_sep], joined_path[dir_sep+1:]
+ if head in self.cgi_directories:
+ self.cgi_info = head, tail
return True
return False
diff --git a/Lib/idlelib/CallTipWindow.py b/Lib/idlelib/CallTipWindow.py
index 27ed0859c8..a2431f8eff 100644
--- a/Lib/idlelib/CallTipWindow.py
+++ b/Lib/idlelib/CallTipWindow.py
@@ -22,6 +22,7 @@ class CallTip:
self.parenline = self.parencol = None
self.lastline = None
self.hideid = self.checkhideid = None
+ self.checkhide_after_id = None
def position_window(self):
"""Check if needs to reposition the window, and if so - do it."""
@@ -102,7 +103,10 @@ class CallTip:
self.hidetip()
else:
self.position_window()
- self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
+ if self.checkhide_after_id is not None:
+ self.widget.after_cancel(self.checkhide_after_id)
+ self.checkhide_after_id = \
+ self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
def hide_event(self, event):
if not self.tipwindow:
diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt
index 0a4f98e9fb..a6b06b40e0 100644
--- a/Lib/idlelib/NEWS.txt
+++ b/Lib/idlelib/NEWS.txt
@@ -1,3 +1,15 @@
+What's New in IDLE 3.2.3?
+=========================
+
+- Issue #14409: IDLE now properly executes commands in the Shell window
+ when it cannot read the normal config files on startup and
+ has to use the built-in default key bindings.
+ There was previously a bug in one of the defaults.
+
+- Issue #3573: IDLE hangs when passing invalid command line args
+ (directory(ies) instead of file(s)).
+
+
What's New in IDLE 3.2.1?
=========================
diff --git a/Lib/idlelib/PyShell.py b/Lib/idlelib/PyShell.py
index 6bf0a8c65d..74a37db862 100644
--- a/Lib/idlelib/PyShell.py
+++ b/Lib/idlelib/PyShell.py
@@ -1389,8 +1389,10 @@ def main():
if enable_edit:
if not (cmd or script):
- for filename in args:
- flist.open(filename)
+ for filename in args[:]:
+ if flist.open(filename) is None:
+ # filename is a directory actually, disconsider it
+ args.remove(filename)
if not args:
flist.new()
if enable_shell:
diff --git a/Lib/idlelib/configHandler.py b/Lib/idlelib/configHandler.py
index 73b8db5b23..da927260ee 100644
--- a/Lib/idlelib/configHandler.py
+++ b/Lib/idlelib/configHandler.py
@@ -595,7 +595,7 @@ class IdleConf:
'<<replace>>': ['<Control-h>'],
'<<goto-line>>': ['<Alt-g>'],
'<<smart-backspace>>': ['<Key-BackSpace>'],
- '<<newline-and-indent>>': ['<Key-Return> <Key-KP_Enter>'],
+ '<<newline-and-indent>>': ['<Key-Return>', '<Key-KP_Enter>'],
'<<smart-indent>>': ['<Key-Tab>'],
'<<indent-region>>': ['<Control-Key-bracketright>'],
'<<dedent-region>>': ['<Control-Key-bracketleft>'],
diff --git a/Lib/idlelib/tabbedpages.py b/Lib/idlelib/tabbedpages.py
index f7917021fb..2557732755 100644
--- a/Lib/idlelib/tabbedpages.py
+++ b/Lib/idlelib/tabbedpages.py
@@ -78,7 +78,7 @@ class TabSet(Frame):
def remove_tab(self, tab_name):
"""Remove the tab named <tab_name>"""
if not tab_name in self._tab_names:
- raise KeyError("No such Tab: '%s" % page_name)
+ raise KeyError("No such Tab: '%s" % tab_name)
self._tab_names.remove(tab_name)
self._arrange_tabs()
@@ -88,7 +88,7 @@ class TabSet(Frame):
if tab_name == self._selected_tab:
return
if tab_name is not None and tab_name not in self._tabs:
- raise KeyError("No such Tab: '%s" % page_name)
+ raise KeyError("No such Tab: '%s" % tab_name)
# deselect the current selected tab
if self._selected_tab is not None:
diff --git a/Lib/lib2to3/tests/test_parser.py b/Lib/lib2to3/tests/test_parser.py
index f32404cc1b..3968e6a41a 100644
--- a/Lib/lib2to3/tests/test_parser.py
+++ b/Lib/lib2to3/tests/test_parser.py
@@ -11,10 +11,14 @@ from __future__ import with_statement
# Testing imports
from . import support
from .support import driver, test_dir
+from test.support import verbose
# Python imports
import os
+import sys
import unittest
+import warnings
+import subprocess
# Local imports
from lib2to3.pgen2 import tokenize
@@ -171,10 +175,12 @@ class TestParserIdempotency(support.TestCase):
try:
tree = driver.parse_string(source)
except ParseError as err:
- print('ParseError on file', filepath, err)
+ if verbose > 0:
+ warnings.warn('ParseError on file %s (%s)' % (filepath, err))
continue
new = str(tree)
- if diff(filepath, new):
+ x = diff(filepath, new)
+ if x:
self.fail("Idempotency failed: %s" % filepath)
def test_extended_unpacking(self):
@@ -183,6 +189,7 @@ class TestParserIdempotency(support.TestCase):
driver.parse_string("(z, *y, w) = m\n")
driver.parse_string("for *z, m in d: pass\n")
+
class TestLiterals(GrammarTest):
def validate(self, s):
@@ -221,7 +228,7 @@ def diff(fn, result):
with open('@', 'w') as f:
f.write(str(result))
fn = fn.replace('"', '\\"')
- return os.system('diff -u "%s" @' % fn)
+ return subprocess.call(['diff', '-u', fn, '@'], stdout=(subprocess.DEVNULL if verbose < 1 else None))
finally:
try:
os.remove("@")
diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py
index 3faad4f335..4191b222a6 100644
--- a/Lib/logging/__init__.py
+++ b/Lib/logging/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
+# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
@@ -16,9 +16,9 @@
"""
Logging package for Python. Based on PEP 282 and comments thereto in
-comp.lang.python, and influenced by Apache's log4j system.
+comp.lang.python.
-Copyright (C) 2001-2011 Vinay Sajip. All Rights Reserved.
+Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
@@ -917,8 +917,12 @@ class StreamHandler(Handler):
"""
Flushes the stream.
"""
- if self.stream and hasattr(self.stream, "flush"):
- self.stream.flush()
+ self.acquire()
+ try:
+ if self.stream and hasattr(self.stream, "flush"):
+ self.stream.flush()
+ finally:
+ self.release()
def emit(self, record):
"""
@@ -969,12 +973,16 @@ class FileHandler(StreamHandler):
"""
Closes the stream.
"""
- if self.stream:
- self.flush()
- if hasattr(self.stream, "close"):
- self.stream.close()
- StreamHandler.close(self)
- self.stream = None
+ self.acquire()
+ try:
+ if self.stream:
+ self.flush()
+ if hasattr(self.stream, "close"):
+ self.stream.close()
+ StreamHandler.close(self)
+ self.stream = None
+ finally:
+ self.release()
def _open(self):
"""
diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py
index 4a6b959946..7689b040c6 100644
--- a/Lib/logging/handlers.py
+++ b/Lib/logging/handlers.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
+# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
@@ -16,10 +16,9 @@
"""
Additional handlers for the logging package for Python. The core package is
-based on PEP 282 and comments thereto in comp.lang.python, and influenced by
-Apache's log4j system.
+based on PEP 282 and comments thereto in comp.lang.python.
-Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
+Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
@@ -271,9 +270,10 @@ class TimedRotatingFileHandler(BaseRotatingHandler):
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
- newRolloverAt = newRolloverAt - 3600
+ addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
- newRolloverAt = newRolloverAt + 3600
+ addend = 3600
+ newRolloverAt += addend
result = newRolloverAt
return result
@@ -324,11 +324,20 @@ class TimedRotatingFileHandler(BaseRotatingHandler):
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
+ currentTime = int(time.time())
+ dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
+ dstThen = timeTuple[-1]
+ if dstNow != dstThen:
+ if dstNow:
+ addend = 3600
+ else:
+ addend = -3600
+ timeTuple = time.localtime(t + addend)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
@@ -338,19 +347,18 @@ class TimedRotatingFileHandler(BaseRotatingHandler):
os.remove(s)
self.mode = 'w'
self.stream = self._open()
- currentTime = int(time.time())
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
- dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
- newRolloverAt = newRolloverAt - 3600
+ addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
- newRolloverAt = newRolloverAt + 3600
+ addend = 3600
+ newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
@@ -511,11 +519,16 @@ class SocketHandler(logging.Handler):
"""
ei = record.exc_info
if ei:
- dummy = self.format(record) # just to get traceback text into record.exc_text
- record.exc_info = None # to avoid Unpickleable error
- s = pickle.dumps(record.__dict__, 1)
- if ei:
- record.exc_info = ei # for next handler
+ # just to get traceback text into record.exc_text ...
+ dummy = self.format(record)
+ # See issue #14436: If msg or args are objects, they may not be
+ # available on the receiving end. So we convert the msg % args
+ # to a string, save it as msg and zap the args.
+ d = dict(record.__dict__)
+ d['msg'] = record.getMessage()
+ d['args'] = None
+ d['exc_info'] = None
+ s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
@@ -554,10 +567,14 @@ class SocketHandler(logging.Handler):
"""
Closes the socket.
"""
- if self.sock:
- self.sock.close()
- self.sock = None
- logging.Handler.close(self)
+ self.acquire()
+ try:
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ logging.Handler.close(self)
+ finally:
+ self.release()
class DatagramHandler(SocketHandler):
"""
@@ -752,9 +769,13 @@ class SysLogHandler(logging.Handler):
"""
Closes the socket.
"""
- if self.unixsocket:
- self.socket.close()
- logging.Handler.close(self)
+ self.acquire()
+ try:
+ if self.unixsocket:
+ self.socket.close()
+ logging.Handler.close(self)
+ finally:
+ self.release()
def mapPriority(self, levelName):
"""
@@ -841,6 +862,7 @@ class SMTPHandler(logging.Handler):
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
+ self._timeout = 5.0
def getSubject(self, record):
"""
@@ -863,7 +885,7 @@ class SMTPHandler(logging.Handler):
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
- smtp = smtplib.SMTP(self.mailhost, port)
+ smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
@@ -1095,7 +1117,11 @@ class BufferingHandler(logging.Handler):
This version just zaps the buffer to empty.
"""
- self.buffer = []
+ self.acquire()
+ try:
+ self.buffer = []
+ finally:
+ self.release()
def close(self):
"""
@@ -1145,18 +1171,26 @@ class MemoryHandler(BufferingHandler):
The record buffer is also cleared by this operation.
"""
- if self.target:
- for record in self.buffer:
- self.target.handle(record)
- self.buffer = []
+ self.acquire()
+ try:
+ if self.target:
+ for record in self.buffer:
+ self.target.handle(record)
+ self.buffer = []
+ finally:
+ self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
- self.target = None
- BufferingHandler.close(self)
+ self.acquire()
+ try:
+ self.target = None
+ BufferingHandler.close(self)
+ finally:
+ self.release()
class QueueHandler(logging.Handler):
diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py
index df00f1d906..8bb0a3b0d8 100644
--- a/Lib/multiprocessing/connection.py
+++ b/Lib/multiprocessing/connection.py
@@ -94,6 +94,17 @@ def arbitrary_address(family):
else:
raise ValueError('unrecognized family')
+def _validate_family(family):
+ '''
+ Checks if the family is valid for the current environment.
+ '''
+ if sys.platform != 'win32' and family == 'AF_PIPE':
+ raise ValueError('Family %s is not recognized.' % family)
+
+ if sys.platform == 'win32' and family == 'AF_UNIX':
+ # double check
+ if not hasattr(socket, family):
+ raise ValueError('Family %s is not recognized.' % family)
def address_type(address):
'''
@@ -126,6 +137,7 @@ class Listener(object):
or default_family
address = address or arbitrary_address(family)
+ _validate_family(family)
if family == 'AF_PIPE':
self._listener = PipeListener(address, backlog)
else:
@@ -163,6 +175,7 @@ def Client(address, family=None, authkey=None):
Returns a connection to the address of a `Listener`
'''
family = family or address_type(address)
+ _validate_family(family)
if family == 'AF_PIPE':
c = PipeClient(address)
else:
diff --git a/Lib/pydoc.py b/Lib/pydoc.py
index f45d46170e..89fd09b556 100755
--- a/Lib/pydoc.py
+++ b/Lib/pydoc.py
@@ -1829,7 +1829,7 @@ has the same effect as typing a particular string at the help> prompt.
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
-the tutorial on the Internet at http://docs.python.org/tutorial/.
+the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
@@ -1839,7 +1839,7 @@ To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
-''' % sys.version[:3])
+''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
diff --git a/Lib/re.py b/Lib/re.py
index 4fe3bd88ac..3fd59df8bc 100644
--- a/Lib/re.py
+++ b/Lib/re.py
@@ -179,14 +179,19 @@ def subn(pattern, repl, string, count=0, flags=0):
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
- returning a list containing the resulting substrings."""
+ returning a list containing the resulting substrings. If
+ capturing parentheses are used in pattern, then the text of all
+ groups in the pattern are also returned as part of the resulting
+ list. If maxsplit is nonzero, at most maxsplit splits occur,
+ and the remainder of the string is returned as the final element
+ of the list."""
return _compile(pattern, flags).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
- If one or more groups are present in the pattern, return a
- list of groups; this will be a list of tuples if the pattern
+ If one or more capturing groups are present in the pattern, return
+ a list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
diff --git a/Lib/rlcompleter.py b/Lib/rlcompleter.py
index 8b74ffaadd..d3a443737a 100644
--- a/Lib/rlcompleter.py
+++ b/Lib/rlcompleter.py
@@ -1,13 +1,11 @@
-"""Word completion for GNU readline 2.0.
+"""Word completion for GNU readline.
-This requires the latest extension to the readline module. The completer
-completes keywords, built-ins and globals in a selectable namespace (which
-defaults to __main__); when completing NAME.NAME..., it evaluates (!) the
-expression up to the last dot and completes its attributes.
+The completer completes keywords, built-ins and globals in a selectable
+namespace (which defaults to __main__); when completing NAME.NAME..., it
+evaluates (!) the expression up to the last dot and completes its attributes.
-It's very cool to do "import sys" type "sys.", hit the
-completion key (twice), and see the list of names defined by the
-sys module!
+It's very cool to do "import sys" type "sys.", hit the completion key (twice),
+and see the list of names defined by the sys module!
Tip: to use the tab key as the completion key, call
@@ -15,21 +13,19 @@ Tip: to use the tab key as the completion key, call
Notes:
-- Exceptions raised by the completer function are *ignored* (and
-generally cause the completion to fail). This is a feature -- since
-readline sets the tty device in raw (or cbreak) mode, printing a
-traceback wouldn't work well without some complicated hoopla to save,
-reset and restore the tty state.
+- Exceptions raised by the completer function are *ignored* (and generally cause
+ the completion to fail). This is a feature -- since readline sets the tty
+ device in raw (or cbreak) mode, printing a traceback wouldn't work well
+ without some complicated hoopla to save, reset and restore the tty state.
-- The evaluation of the NAME.NAME... form may cause arbitrary
-application defined code to be executed if an object with a
-__getattr__ hook is found. Since it is the responsibility of the
-application (or the user) to enable this feature, I consider this an
-acceptable risk. More complicated expressions (e.g. function calls or
-indexing operations) are *not* evaluated.
+- The evaluation of the NAME.NAME... form may cause arbitrary application
+ defined code to be executed if an object with a __getattr__ hook is found.
+ Since it is the responsibility of the application (or the user) to enable this
+ feature, I consider this an acceptable risk. More complicated expressions
+ (e.g. function calls or indexing operations) are *not* evaluated.
- When the original stdin is not a tty device, GNU readline is never
-used, and this module (and the readline module) are silently inactive.
+ used, and this module (and the readline module) are silently inactive.
"""
diff --git a/Lib/socket.py b/Lib/socket.py
index 1e285493c4..a93cd11248 100644
--- a/Lib/socket.py
+++ b/Lib/socket.py
@@ -197,6 +197,17 @@ class socket(_socket.socket):
if self._io_refs <= 0:
self._real_close()
+ def detach(self):
+ """detach() -> file descriptor
+
+ Close the socket object without closing the underlying file descriptor.
+ The object cannot be used after this call, but the file descriptor
+ can be reused for other purposes. The file descriptor is returned.
+ """
+ self._closed = True
+ return super().detach()
+
+
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
diff --git a/Lib/socketserver.py b/Lib/socketserver.py
index cd367f15e2..adf9f38ead 100644
--- a/Lib/socketserver.py
+++ b/Lib/socketserver.py
@@ -133,6 +133,7 @@ import socket
import select
import sys
import os
+import errno
try:
import threading
except ImportError:
@@ -147,6 +148,15 @@ if hasattr(socket, "AF_UNIX"):
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
+def _eintr_retry(func, *args):
+ """restart a system call interrupted by EINTR"""
+ while True:
+ try:
+ return func(*args)
+ except (OSError, select.error) as e:
+ if e.args[0] != errno.EINTR:
+ raise
+
class BaseServer:
"""Base class for server classes.
@@ -222,7 +232,8 @@ class BaseServer:
# connecting to the socket to wake this up instead of
# polling. Polling reduces our responsiveness to a
# shutdown request and wastes cpu at all other times.
- r, w, e = select.select([self], [], [], poll_interval)
+ r, w, e = _eintr_retry(select.select, [self], [], [],
+ poll_interval)
if self in r:
self._handle_request_noblock()
finally:
@@ -262,7 +273,7 @@ class BaseServer:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
- fd_sets = select.select([self], [], [], timeout)
+ fd_sets = _eintr_retry(select.select, [self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
diff --git a/Lib/subprocess.py b/Lib/subprocess.py
index 017f58d717..179f41a85f 100644
--- a/Lib/subprocess.py
+++ b/Lib/subprocess.py
@@ -1075,7 +1075,17 @@ class Popen(object):
def terminate(self):
"""Terminates the process
"""
- _subprocess.TerminateProcess(self._handle, 1)
+ try:
+ _subprocess.TerminateProcess(self._handle, 1)
+ except OSError as e:
+ # ERROR_ACCESS_DENIED (winerror 5) is received when the
+ # process already died.
+ if e.winerror != 5:
+ raise
+ rc = _subprocess.GetExitCodeProcess(self._handle)
+ if rc == _subprocess.STILL_ACTIVE:
+ raise
+ self.returncode = rc
kill = terminate
diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py
index 135a90e772..714a116f87 100755
--- a/Lib/test/regrtest.py
+++ b/Lib/test/regrtest.py
@@ -677,10 +677,10 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
- if environment_changed:
- print("{} altered the execution environment:".format(
- count(len(environment_changed), "test")))
- printlist(environment_changed)
+ if environment_changed:
+ print("{} altered the execution environment:".format(
+ count(len(environment_changed), "test")))
+ printlist(environment_changed)
if skipped and not quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
@@ -890,7 +890,9 @@ class saved_test_environment:
'logging._handlers', 'logging._handlerList',
'shutil.archive_formats', 'shutil.unpack_formats',
'sys.warnoptions', 'threading._dangling',
- 'multiprocessing.process._dangling')
+ 'multiprocessing.process._dangling',
+ 'support.TESTFN',
+ )
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
@@ -1020,6 +1022,21 @@ class saved_test_environment:
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
+ def get_support_TESTFN(self):
+ if os.path.isfile(support.TESTFN):
+ result = 'f'
+ elif os.path.isdir(support.TESTFN):
+ result = 'd'
+ else:
+ result = None
+ return result
+ def restore_support_TESTFN(self, saved_value):
+ if saved_value is None:
+ if os.path.isfile(support.TESTFN):
+ os.unlink(support.TESTFN)
+ elif os.path.isdir(support.TESTFN):
+ shutil.rmtree(support.TESTFN)
+
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
diff --git a/Lib/test/test_aifc.py b/Lib/test/test_aifc.py
index 236f9b6610..ee4ad6b0a3 100644
--- a/Lib/test/test_aifc.py
+++ b/Lib/test/test_aifc.py
@@ -1,7 +1,8 @@
-from test.support import findfile, run_unittest, TESTFN
+from test.support import findfile, run_unittest, TESTFN, captured_stdout, unlink
import unittest
import os
import io
+import struct
import aifc
@@ -20,10 +21,8 @@ class AIFCTest(unittest.TestCase):
self.fout.close()
except (aifc.Error, AttributeError):
pass
- try:
- os.remove(TESTFN)
- except OSError:
- pass
+ unlink(TESTFN)
+ unlink(TESTFN + '.aiff')
def test_skipunknown(self):
#Issue 2245
@@ -32,6 +31,7 @@ class AIFCTest(unittest.TestCase):
def test_params(self):
f = self.f = aifc.open(self.sndfilepath)
+ self.assertEqual(f.getfp().name, self.sndfilepath)
self.assertEqual(f.getnchannels(), 2)
self.assertEqual(f.getsampwidth(), 2)
self.assertEqual(f.getframerate(), 48000)
@@ -45,6 +45,7 @@ class AIFCTest(unittest.TestCase):
def test_read(self):
f = self.f = aifc.open(self.sndfilepath)
+ self.assertEqual(f.readframes(0), b'')
self.assertEqual(f.tell(), 0)
self.assertEqual(f.readframes(2), b'\x00\x00\x00\x00\x0b\xd4\x0b\xd4')
f.rewind()
@@ -58,6 +59,10 @@ class AIFCTest(unittest.TestCase):
self.assertEqual(f.readframes(2), b'\x17t\x17t"\xad"\xad')
f.setpos(pos0)
self.assertEqual(f.readframes(2), b'\x00\x00\x00\x00\x0b\xd4\x0b\xd4')
+ with self.assertRaises(aifc.Error):
+ f.setpos(-1)
+ with self.assertRaises(aifc.Error):
+ f.setpos(f.getnframes() + 1)
def test_write(self):
f = self.f = aifc.open(self.sndfilepath)
@@ -92,8 +97,6 @@ class AIFCTest(unittest.TestCase):
self.assertEqual(f.getparams()[0:3], fout.getparams()[0:3])
self.assertEqual(fout.getcomptype(), b'ULAW')
self.assertEqual(fout.getcompname(), b'foo')
- # XXX: this test fails, not sure if it should succeed or not
- # self.assertEqual(f.readframes(5), fout.readframes(5))
def test_close(self):
class Wrapfile(object):
@@ -112,7 +115,7 @@ class AIFCTest(unittest.TestCase):
def test_write_header_comptype_sampwidth(self):
for comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
- fout = self.fout = aifc.open(io.BytesIO(), 'wb')
+ fout = aifc.open(io.BytesIO(), 'wb')
fout.setnchannels(1)
fout.setframerate(1)
fout.setcomptype(comptype, b'')
@@ -121,7 +124,7 @@ class AIFCTest(unittest.TestCase):
fout.initfp(None)
def test_write_markers_values(self):
- fout = self.fout = aifc.open(io.BytesIO(), 'wb')
+ fout = aifc.open(io.BytesIO(), 'wb')
self.assertEqual(fout.getmarkers(), None)
fout.setmark(1, 0, b'foo1')
fout.setmark(1, 1, b'foo2')
@@ -179,6 +182,148 @@ class AIFCLowLevelTest(unittest.TestCase):
with self.assertRaises(ValueError):
aifc._write_string(f, b'too long' * 255)
+ def test_wrong_open_mode(self):
+ with self.assertRaises(aifc.Error):
+ aifc.open(TESTFN, 'wrong_mode')
+
+ def test_read_wrong_form(self):
+ b1 = io.BytesIO(b'WRNG' + struct.pack('>L', 0))
+ b2 = io.BytesIO(b'FORM' + struct.pack('>L', 4) + b'WRNG')
+ self.assertRaises(aifc.Error, aifc.open, b1)
+ self.assertRaises(aifc.Error, aifc.open, b2)
+
+ def test_read_no_comm_chunk(self):
+ b = io.BytesIO(b'FORM' + struct.pack('>L', 4) + b'AIFF')
+ self.assertRaises(aifc.Error, aifc.open, b)
+
+ def test_read_wrong_compression_type(self):
+ b = b'FORM' + struct.pack('>L', 4) + b'AIFC'
+ b += b'COMM' + struct.pack('>LhlhhLL', 23, 0, 0, 0, 0, 0, 0)
+ b += b'WRNG' + struct.pack('B', 0)
+ self.assertRaises(aifc.Error, aifc.open, io.BytesIO(b))
+
+ def test_read_wrong_marks(self):
+ b = b'FORM' + struct.pack('>L', 4) + b'AIFF'
+ b += b'COMM' + struct.pack('>LhlhhLL', 18, 0, 0, 0, 0, 0, 0)
+ b += b'SSND' + struct.pack('>L', 8) + b'\x00' * 8
+ b += b'MARK' + struct.pack('>LhB', 3, 1, 1)
+ with captured_stdout() as s:
+ f = aifc.open(io.BytesIO(b))
+ self.assertEqual(
+ s.getvalue(),
+ 'Warning: MARK chunk contains only 0 markers instead of 1\n')
+ self.assertEqual(f.getmarkers(), None)
+
+ def test_read_comm_kludge_compname_even(self):
+ b = b'FORM' + struct.pack('>L', 4) + b'AIFC'
+ b += b'COMM' + struct.pack('>LhlhhLL', 18, 0, 0, 0, 0, 0, 0)
+ b += b'NONE' + struct.pack('B', 4) + b'even' + b'\x00'
+ b += b'SSND' + struct.pack('>L', 8) + b'\x00' * 8
+ with captured_stdout() as s:
+ f = aifc.open(io.BytesIO(b))
+ self.assertEqual(s.getvalue(), 'Warning: bad COMM chunk size\n')
+ self.assertEqual(f.getcompname(), b'even')
+
+ def test_read_comm_kludge_compname_odd(self):
+ b = b'FORM' + struct.pack('>L', 4) + b'AIFC'
+ b += b'COMM' + struct.pack('>LhlhhLL', 18, 0, 0, 0, 0, 0, 0)
+ b += b'NONE' + struct.pack('B', 3) + b'odd'
+ b += b'SSND' + struct.pack('>L', 8) + b'\x00' * 8
+ with captured_stdout() as s:
+ f = aifc.open(io.BytesIO(b))
+ self.assertEqual(s.getvalue(), 'Warning: bad COMM chunk size\n')
+ self.assertEqual(f.getcompname(), b'odd')
+
+ def test_write_params_raises(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ wrong_params = (0, 0, 0, 0, b'WRNG', '')
+ self.assertRaises(aifc.Error, fout.setparams, wrong_params)
+ self.assertRaises(aifc.Error, fout.getparams)
+ self.assertRaises(aifc.Error, fout.setnchannels, 0)
+ self.assertRaises(aifc.Error, fout.getnchannels)
+ self.assertRaises(aifc.Error, fout.setsampwidth, 0)
+ self.assertRaises(aifc.Error, fout.getsampwidth)
+ self.assertRaises(aifc.Error, fout.setframerate, 0)
+ self.assertRaises(aifc.Error, fout.getframerate)
+ self.assertRaises(aifc.Error, fout.setcomptype, b'WRNG', '')
+ fout.aiff()
+ fout.setnchannels(1)
+ fout.setsampwidth(1)
+ fout.setframerate(1)
+ fout.setnframes(1)
+ fout.writeframes(b'\x00')
+ self.assertRaises(aifc.Error, fout.setparams, (1, 1, 1, 1, 1, 1))
+ self.assertRaises(aifc.Error, fout.setnchannels, 1)
+ self.assertRaises(aifc.Error, fout.setsampwidth, 1)
+ self.assertRaises(aifc.Error, fout.setframerate, 1)
+ self.assertRaises(aifc.Error, fout.setnframes, 1)
+ self.assertRaises(aifc.Error, fout.setcomptype, b'NONE', '')
+ self.assertRaises(aifc.Error, fout.aiff)
+ self.assertRaises(aifc.Error, fout.aifc)
+
+ def test_write_params_singles(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ fout.aifc()
+ fout.setnchannels(1)
+ fout.setsampwidth(2)
+ fout.setframerate(3)
+ fout.setnframes(4)
+ fout.setcomptype(b'NONE', b'name')
+ self.assertEqual(fout.getnchannels(), 1)
+ self.assertEqual(fout.getsampwidth(), 2)
+ self.assertEqual(fout.getframerate(), 3)
+ self.assertEqual(fout.getnframes(), 0)
+ self.assertEqual(fout.tell(), 0)
+ self.assertEqual(fout.getcomptype(), b'NONE')
+ self.assertEqual(fout.getcompname(), b'name')
+ fout.writeframes(b'\x00' * 4 * fout.getsampwidth() * fout.getnchannels())
+ self.assertEqual(fout.getnframes(), 4)
+ self.assertEqual(fout.tell(), 4)
+
+ def test_write_params_bunch(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ fout.aifc()
+ p = (1, 2, 3, 4, b'NONE', b'name')
+ fout.setparams(p)
+ self.assertEqual(fout.getparams(), p)
+ fout.initfp(None)
+
+ def test_write_header_raises(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ self.assertRaises(aifc.Error, fout.close)
+ fout.setnchannels(1)
+ self.assertRaises(aifc.Error, fout.close)
+ fout.setsampwidth(1)
+ self.assertRaises(aifc.Error, fout.close)
+ fout.initfp(None)
+
+ def test_write_header_comptype_raises(self):
+ for comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ fout.setsampwidth(1)
+ fout.setcomptype(comptype, b'')
+ self.assertRaises(aifc.Error, fout.close)
+ fout.initfp(None)
+
+ def test_write_markers_raises(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ self.assertRaises(aifc.Error, fout.setmark, 0, 0, b'')
+ self.assertRaises(aifc.Error, fout.setmark, 1, -1, b'')
+ self.assertRaises(aifc.Error, fout.setmark, 1, 0, None)
+ self.assertRaises(aifc.Error, fout.getmark, 1)
+ fout.initfp(None)
+
+ def test_write_aiff_by_extension(self):
+ sampwidth = 2
+ fout = self.fout = aifc.open(TESTFN + '.aiff', 'wb')
+ fout.setparams((1, sampwidth, 1, 1, b'ULAW', b''))
+ frames = b'\x00' * fout.getnchannels() * sampwidth
+ fout.writeframes(frames)
+ fout.close()
+ f = self.f = aifc.open(TESTFN + '.aiff', 'rb')
+ self.assertEqual(f.getcomptype(), b'NONE')
+ f.close()
+
def test_main():
run_unittest(AIFCTest)
diff --git a/Lib/test/test_ast.py b/Lib/test/test_ast.py
index be9f05eb46..e22d4de88b 100644
--- a/Lib/test/test_ast.py
+++ b/Lib/test/test_ast.py
@@ -195,12 +195,6 @@ class AST_Tests(unittest.TestCase):
with self.assertRaises(AttributeError):
x.vararg
- with self.assertRaises(AttributeError):
- x.foobar = 21
-
- with self.assertRaises(AttributeError):
- ast.AST(lineno=2)
-
with self.assertRaises(TypeError):
# "_ast.AST constructor takes 0 positional arguments"
ast.AST(2)
@@ -224,6 +218,12 @@ class AST_Tests(unittest.TestCase):
im = ast.parse("from . import y").body[0]
self.assertIsNone(im.module)
+ def test_non_interned_future_from_ast(self):
+ mod = ast.parse("from __future__ import division")
+ self.assertIsInstance(mod.body[0], ast.ImportFrom)
+ mod.body[0].module = " __future__ ".strip()
+ compile(mod, "<test>", "exec")
+
def test_base_classes(self):
self.assertTrue(issubclass(ast.For, ast.stmt))
self.assertTrue(issubclass(ast.Name, ast.expr))
diff --git a/Lib/test/test_asyncore.py b/Lib/test/test_asyncore.py
index 53c49a803b..5f55df89f5 100644
--- a/Lib/test/test_asyncore.py
+++ b/Lib/test/test_asyncore.py
@@ -7,6 +7,7 @@ import sys
import time
import warnings
import errno
+import struct
from test import support
from test.support import TESTFN, run_unittest, unlink
@@ -730,6 +731,25 @@ class BaseTestAPI(unittest.TestCase):
finally:
sock.close()
+ @unittest.skipUnless(threading, 'Threading required for this test.')
+ @support.reap_threads
+ def test_quick_connect(self):
+ # see: http://bugs.python.org/issue10340
+ server = TCPServer()
+ t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1, count=500))
+ t.start()
+
+ for x in range(20):
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.settimeout(.2)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
+ struct.pack('ii', 1, 0))
+ try:
+ s.connect(server.address)
+ except socket.error:
+ pass
+ finally:
+ s.close()
class TestAPI_UseSelect(BaseTestAPI):
use_poll = False
diff --git a/Lib/test/test_base64.py b/Lib/test/test_base64.py
index 93c623cc8a..ca94504b1c 100644
--- a/Lib/test/test_base64.py
+++ b/Lib/test/test_base64.py
@@ -2,6 +2,7 @@ import unittest
from test import support
import base64
import binascii
+import os
import sys
import subprocess
@@ -227,6 +228,10 @@ class BaseXYTestCase(unittest.TestCase):
class TestMain(unittest.TestCase):
+ def tearDown(self):
+ if os.path.exists(support.TESTFN):
+ os.unlink(support.TESTFN)
+
def get_output(self, *args, **options):
args = (sys.executable, '-m', 'base64') + args
return subprocess.check_output(args, **options)
diff --git a/Lib/test/test_cgi.py b/Lib/test/test_cgi.py
index 9951e93f6e..3031fb3826 100644
--- a/Lib/test/test_cgi.py
+++ b/Lib/test/test_cgi.py
@@ -118,6 +118,11 @@ def gen_result(data, environ):
class CgiTests(unittest.TestCase):
+ def test_escape(self):
+ self.assertEqual("test &amp; string", cgi.escape("test & string"))
+ self.assertEqual("&lt;test string&gt;", cgi.escape("<test string>"))
+ self.assertEqual("&quot;test string&quot;", cgi.escape('"test string"', True))
+
def test_strict(self):
for orig, expect in parse_strict_test_cases:
# Test basic parsing
diff --git a/Lib/test/test_concurrent_futures.py b/Lib/test/test_concurrent_futures.py
index 372da27dca..2afa93802e 100644
--- a/Lib/test/test_concurrent_futures.py
+++ b/Lib/test/test_concurrent_futures.py
@@ -183,7 +183,9 @@ class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
for p in processes:
p.join()
+
class WaitTests(unittest.TestCase):
+
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
@@ -284,7 +286,21 @@ class WaitTests(unittest.TestCase):
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests):
- pass
+
+ def test_pending_calls_race(self):
+ # Issue #14406: multi-threaded race condition when waiting on all
+ # futures.
+ event = threading.Event()
+ def future_func():
+ event.wait()
+ oldswitchinterval = sys.getswitchinterval()
+ sys.setswitchinterval(1e-6)
+ try:
+ fs = {self.executor.submit(future_func) for i in range(100)}
+ event.set()
+ futures.wait(fs, return_when=futures.ALL_COMPLETED)
+ finally:
+ sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests):
diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py
index 92304b4e7e..141d7918dd 100644
--- a/Lib/test/test_descr.py
+++ b/Lib/test/test_descr.py
@@ -1,8 +1,10 @@
import builtins
+import gc
import sys
import types
import math
import unittest
+import weakref
from copy import deepcopy
from test import support
@@ -1186,7 +1188,6 @@ order (MRO) for bases """
self.assertEqual(Counted.counter, 0)
# Test lookup leaks [SF bug 572567]
- import gc
if hasattr(gc, 'get_objects'):
class G(object):
def __eq__(self, other):
@@ -4380,7 +4381,6 @@ order (MRO) for bases """
self.assertRaises(AttributeError, getattr, C(), "attr")
self.assertEqual(descr.counter, 4)
- import gc
class EvilGetattribute(object):
# This used to segfault
def __getattr__(self, name):
@@ -4393,6 +4393,9 @@ order (MRO) for bases """
self.assertRaises(AttributeError, getattr, EvilGetattribute(), "attr")
+ def test_type___getattribute__(self):
+ self.assertRaises(TypeError, type.__getattribute__, list, type)
+
def test_abstractmethods(self):
# type pretends not to have __abstractmethods__.
self.assertRaises(AttributeError, getattr, type, "__abstractmethods__")
@@ -4429,6 +4432,21 @@ order (MRO) for bases """
foo = Foo()
str(foo)
+ def test_cycle_through_dict(self):
+ # See bug #1469629
+ class X(dict):
+ def __init__(self):
+ dict.__init__(self)
+ self.__dict__ = self
+ x = X()
+ x.attr = 42
+ wr = weakref.ref(x)
+ del x
+ support.gc_collect()
+ self.assertIsNone(wr())
+ for o in gc.get_objects():
+ self.assertIsNot(type(o), X)
+
class DictProxyTests(unittest.TestCase):
def setUp(self):
class C(object):
diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py
index 1507e42053..d2740a3384 100644
--- a/Lib/test/test_dict.py
+++ b/Lib/test/test_dict.py
@@ -299,6 +299,26 @@ class DictTest(unittest.TestCase):
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
+ def test_setdefault_atomic(self):
+ # Issue #13521: setdefault() calls __hash__ and __eq__ only once.
+ class Hashed(object):
+ def __init__(self):
+ self.hash_count = 0
+ self.eq_count = 0
+ def __hash__(self):
+ self.hash_count += 1
+ return 42
+ def __eq__(self, other):
+ self.eq_count += 1
+ return id(self) == id(other)
+ hashed1 = Hashed()
+ y = {hashed1: 5}
+ hashed2 = Hashed()
+ y.setdefault(hashed2, [])
+ self.assertEqual(hashed1.hash_count, 1)
+ self.assertEqual(hashed2.hash_count, 1)
+ self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
+
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py
index 0a7ddd4c5b..7a2dd0c8f1 100644
--- a/Lib/test/test_exceptions.py
+++ b/Lib/test/test_exceptions.py
@@ -38,7 +38,7 @@ class ExceptionTests(unittest.TestCase):
try:
try:
import marshal
- marshal.loads('')
+ marshal.loads(b'')
except EOFError:
pass
finally:
diff --git a/Lib/test/test_fractions.py b/Lib/test/test_fractions.py
index 26e132f4e5..084ae0cce5 100644
--- a/Lib/test/test_fractions.py
+++ b/Lib/test/test_fractions.py
@@ -6,6 +6,7 @@ import math
import numbers
import operator
import fractions
+import sys
import unittest
from copy import copy, deepcopy
from pickle import dumps, loads
@@ -76,6 +77,9 @@ class DummyRational(object):
def __float__(self):
assert False, "__float__ should not be invoked"
+class DummyFraction(fractions.Fraction):
+ """Dummy Fraction subclass for copy and deepcopy testing."""
+
class GcdTest(unittest.TestCase):
def testMisc(self):
@@ -286,9 +290,14 @@ class FractionTest(unittest.TestCase):
self.assertEqual(F(201, 200).limit_denominator(100), F(1))
self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101))
self.assertEqual(F(0).limit_denominator(10000), F(0))
+ for i in (0, -1):
+ self.assertRaisesMessage(
+ ValueError, "max_denominator should be at least 1",
+ F(1).limit_denominator, i)
def testConversions(self):
self.assertTypedEquals(-1, math.trunc(F(-11, 10)))
+ self.assertTypedEquals(1, math.trunc(F(11, 10)))
self.assertTypedEquals(-2, math.floor(F(-11, 10)))
self.assertTypedEquals(-1, math.ceil(F(-11, 10)))
self.assertTypedEquals(-1, math.ceil(F(-10, 10)))
@@ -329,6 +338,7 @@ class FractionTest(unittest.TestCase):
self.assertEqual(F(8, 27), F(2, 3) ** F(3))
self.assertEqual(F(27, 8), F(2, 3) ** F(-3))
self.assertTypedEquals(2.0, F(4) ** F(1, 2))
+ self.assertEqual(F(1, 1), +F(1, 1))
z = pow(F(-1), F(1, 2))
self.assertAlmostEqual(z.real, 0)
self.assertEqual(z.imag, 1)
@@ -395,6 +405,10 @@ class FractionTest(unittest.TestCase):
TypeError,
"unsupported operand type(s) for +: 'Fraction' and 'Decimal'",
operator.add, F(3,11), Decimal('3.1415926'))
+ self.assertRaisesMessage(
+ TypeError,
+ "unsupported operand type(s) for +: 'Decimal' and 'Fraction'",
+ operator.add, Decimal('3.1415926'), F(3,11))
def testComparisons(self):
self.assertTrue(F(1, 2) < F(2, 3))
@@ -538,9 +552,12 @@ class FractionTest(unittest.TestCase):
self.assertEqual("7", str(F(7, 1)))
def testHash(self):
+ hmod = sys.hash_info.modulus
+ hinf = sys.hash_info.inf
self.assertEqual(hash(2.5), hash(F(5, 2)))
self.assertEqual(hash(10**50), hash(F(10**50)))
self.assertNotEqual(hash(float(10**23)), hash(F(10**23)))
+ self.assertEqual(hinf, hash(F(1, hmod)))
# Check that __hash__ produces the same value as hash(), for
# consistency with int and Decimal. (See issue #10356.)
self.assertEqual(hash(F(-1)), F(-1).__hash__())
@@ -574,9 +591,14 @@ class FractionTest(unittest.TestCase):
def test_copy_deepcopy_pickle(self):
r = F(13, 7)
+ dr = DummyFraction(13, 7)
self.assertEqual(r, loads(dumps(r)))
self.assertEqual(id(r), id(copy(r)))
self.assertEqual(id(r), id(deepcopy(r)))
+ self.assertNotEqual(id(dr), id(copy(dr)))
+ self.assertNotEqual(id(dr), id(deepcopy(dr)))
+ self.assertTypedEquals(dr, copy(dr))
+ self.assertTypedEquals(dr, deepcopy(dr))
def test_slots(self):
# Issue 4998
diff --git a/Lib/test/test_mailbox.py b/Lib/test/test_mailbox.py
index ef64366d5e..f885d33eea 100644
--- a/Lib/test/test_mailbox.py
+++ b/Lib/test/test_mailbox.py
@@ -7,6 +7,7 @@ import email
import email.message
import re
import io
+import shutil
import tempfile
from test import support
import unittest
@@ -19,7 +20,7 @@ except ImportError:
pass
-class TestBase(unittest.TestCase):
+class TestBase:
def _check_sample(self, msg):
# Inspect a mailbox.Message representation of the sample message
@@ -38,12 +39,7 @@ class TestBase(unittest.TestCase):
def _delete_recursively(self, target):
# Delete a file or delete a directory recursively
if os.path.isdir(target):
- for path, dirs, files in os.walk(target, topdown=False):
- for name in files:
- os.remove(os.path.join(path, name))
- for name in dirs:
- os.rmdir(os.path.join(path, name))
- os.rmdir(target)
+ shutil.rmtree(target)
elif os.path.exists(target):
os.remove(target)
@@ -115,10 +111,10 @@ class TestMailbox(TestBase):
self.assertMailboxEmpty()
def test_add_that_raises_leaves_mailbox_empty(self):
- # XXX This test will start failing when Message learns to handle
- # non-ASCII string headers, and a different internal failure will
- # need to be found or manufactured.
- with self.assertRaises(ValueError):
+ def raiser(*args, **kw):
+ raise Exception("a fake error")
+ support.patch(self, email.generator.BytesGenerator, 'flatten', raiser)
+ with self.assertRaises(Exception):
self._box.add(email.message_from_string("From: Alphöso"))
self.assertEqual(len(self._box), 0)
self._box.close()
@@ -549,7 +545,7 @@ class TestMailbox(TestBase):
return self._path + '.lock'
-class TestMailboxSuperclass(TestBase):
+class TestMailboxSuperclass(TestBase, unittest.TestCase):
def test_notimplemented(self):
# Test that all Mailbox methods raise NotImplementedException.
@@ -585,7 +581,7 @@ class TestMailboxSuperclass(TestBase):
self.assertRaises(NotImplementedError, lambda: box.close())
-class TestMaildir(TestMailbox):
+class TestMaildir(TestMailbox, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.Maildir(path, factory)
@@ -1047,7 +1043,7 @@ class _TestMboxMMDF(TestMailbox):
self._box.close()
-class TestMbox(_TestMboxMMDF):
+class TestMbox(_TestMboxMMDF, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.mbox(path, factory)
@@ -1070,12 +1066,12 @@ class TestMbox(_TestMboxMMDF):
perms = st.st_mode
self.assertFalse((perms & 0o111)) # Execute bits should all be off.
-class TestMMDF(_TestMboxMMDF):
+class TestMMDF(_TestMboxMMDF, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.MMDF(path, factory)
-class TestMH(TestMailbox):
+class TestMH(TestMailbox, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.MH(path, factory)
@@ -1210,7 +1206,7 @@ class TestMH(TestMailbox):
return os.path.join(self._path, '.mh_sequences.lock')
-class TestBabyl(TestMailbox):
+class TestBabyl(TestMailbox, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.Babyl(path, factory)
@@ -1275,7 +1271,7 @@ class TestFakeMailBox(unittest.TestCase):
self.assertTrue(box.files[i].closed)
-class TestMessage(TestBase):
+class TestMessage(TestBase, unittest.TestCase):
_factory = mailbox.Message # Overridden by subclasses to reuse tests
@@ -1355,7 +1351,7 @@ class TestMessage(TestBase):
pass
-class TestMaildirMessage(TestMessage):
+class TestMaildirMessage(TestMessage, unittest.TestCase):
_factory = mailbox.MaildirMessage
@@ -1429,7 +1425,7 @@ class TestMaildirMessage(TestMessage):
self._check_sample(msg)
-class _TestMboxMMDFMessage(TestMessage):
+class _TestMboxMMDFMessage:
_factory = mailbox._mboxMMDFMessage
@@ -1476,12 +1472,12 @@ class _TestMboxMMDFMessage(TestMessage):
r"\d{2} \d{4}", msg.get_from()) is not None)
-class TestMboxMessage(_TestMboxMMDFMessage):
+class TestMboxMessage(_TestMboxMMDFMessage, TestMessage):
_factory = mailbox.mboxMessage
-class TestMHMessage(TestMessage):
+class TestMHMessage(TestMessage, unittest.TestCase):
_factory = mailbox.MHMessage
@@ -1512,7 +1508,7 @@ class TestMHMessage(TestMessage):
self.assertEqual(msg.get_sequences(), ['foobar', 'replied'])
-class TestBabylMessage(TestMessage):
+class TestBabylMessage(TestMessage, unittest.TestCase):
_factory = mailbox.BabylMessage
@@ -1567,12 +1563,12 @@ class TestBabylMessage(TestMessage):
self.assertEqual(visible[header], msg[header])
-class TestMMDFMessage(_TestMboxMMDFMessage):
+class TestMMDFMessage(_TestMboxMMDFMessage, TestMessage):
_factory = mailbox.MMDFMessage
-class TestMessageConversion(TestBase):
+class TestMessageConversion(TestBase, unittest.TestCase):
def test_plain_to_x(self):
# Convert Message to all formats
@@ -1913,7 +1909,7 @@ class TestProxyFileBase(TestBase):
self.assertTrue(proxy.closed)
-class TestProxyFile(TestProxyFileBase):
+class TestProxyFile(TestProxyFileBase, unittest.TestCase):
def setUp(self):
self._path = support.TESTFN
@@ -1962,7 +1958,7 @@ class TestProxyFile(TestProxyFileBase):
self._test_close(mailbox._ProxyFile(self._file))
-class TestPartialFile(TestProxyFileBase):
+class TestPartialFile(TestProxyFileBase, unittest.TestCase):
def setUp(self):
self._path = support.TESTFN
@@ -2029,6 +2025,10 @@ class MaildirTestCase(unittest.TestCase):
def setUp(self):
# create a new maildir mailbox to work with:
self._dir = support.TESTFN
+ if os.path.isdir(self._dir):
+ shutil.rmtree(self._dir)
+ elif os.path.isfile(self._dir):
+ os.unlink(self._dir)
os.mkdir(self._dir)
os.mkdir(os.path.join(self._dir, "cur"))
os.mkdir(os.path.join(self._dir, "tmp"))
diff --git a/Lib/test/test_marshal.py b/Lib/test/test_marshal.py
index cd100f9be4..96a70ecc2a 100644
--- a/Lib/test/test_marshal.py
+++ b/Lib/test/test_marshal.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
from test import support
+import array
import marshal
import sys
import unittest
@@ -137,6 +138,27 @@ class ContainerTestCase(unittest.TestCase, HelperMixin):
for constructor in (set, frozenset):
self.helper(constructor(self.d.keys()))
+
+class BufferTestCase(unittest.TestCase, HelperMixin):
+
+ def test_bytearray(self):
+ b = bytearray(b"abc")
+ self.helper(b)
+ new = marshal.loads(marshal.dumps(b))
+ self.assertEqual(type(new), bytes)
+
+ def test_memoryview(self):
+ b = memoryview(b"abc")
+ self.helper(b)
+ new = marshal.loads(marshal.dumps(b))
+ self.assertEqual(type(new), bytes)
+
+ def test_array(self):
+ a = array.array('B', b"abc")
+ new = marshal.loads(marshal.dumps(a))
+ self.assertEqual(new, b"abc")
+
+
class BugsTestCase(unittest.TestCase):
def test_bug_5888452(self):
# Simple-minded check for SF 588452: Debug build crashes
@@ -162,7 +184,7 @@ class BugsTestCase(unittest.TestCase):
pass
def test_loads_recursion(self):
- s = 'c' + ('X' * 4*4) + '{' * 2**20
+ s = b'c' + (b'X' * 4*4) + b'{' * 2**20
self.assertRaises(ValueError, marshal.loads, s)
def test_recursion_limit(self):
@@ -235,6 +257,11 @@ class BugsTestCase(unittest.TestCase):
finally:
support.unlink(support.TESTFN)
+ def test_loads_reject_unicode_strings(self):
+ # Issue #14177: marshal.loads() should not accept unicode strings
+ unicode_string = 'T'
+ self.assertRaises(TypeError, marshal.loads, unicode_string)
+
def test_main():
support.run_unittest(IntTestCase,
@@ -243,6 +270,7 @@ def test_main():
CodeTestCase,
ContainerTestCase,
ExceptionTestCase,
+ BufferTestCase,
BugsTestCase)
if __name__ == "__main__":
diff --git a/Lib/test/test_minidom.py b/Lib/test/test_minidom.py
index f3fa1b8c58..4a69b00153 100644
--- a/Lib/test/test_minidom.py
+++ b/Lib/test/test_minidom.py
@@ -350,13 +350,31 @@ class MinidomTest(unittest.TestCase):
def testGetAttrList(self):
pass
- def testGetAttrValues(self): pass
+ def testGetAttrValues(self):
+ pass
- def testGetAttrLength(self): pass
+ def testGetAttrLength(self):
+ pass
- def testGetAttribute(self): pass
+ def testGetAttribute(self):
+ dom = Document()
+ child = dom.appendChild(
+ dom.createElementNS("http://www.python.org", "python:abc"))
+ self.assertEqual(child.getAttribute('missing'), '')
- def testGetAttributeNS(self): pass
+ def testGetAttributeNS(self):
+ dom = Document()
+ child = dom.appendChild(
+ dom.createElementNS("http://www.python.org", "python:abc"))
+ child.setAttributeNS("http://www.w3.org", "xmlns:python",
+ "http://www.python.org")
+ self.assertEqual(child.getAttributeNS("http://www.w3.org", "python"),
+ 'http://www.python.org')
+ self.assertEqual(child.getAttributeNS("http://www.w3.org", "other"),
+ '')
+ child2 = child.appendChild(dom.createElement('abc'))
+ self.assertEqual(child2.getAttributeNS("http://www.python.org", "missing"),
+ '')
def testGetAttributeNode(self): pass
diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py
index 8edb420864..298faf73fd 100644
--- a/Lib/test/test_multiprocessing.py
+++ b/Lib/test/test_multiprocessing.py
@@ -2319,8 +2319,26 @@ class TestStdinBadfiledescriptor(unittest.TestCase):
flike.flush()
assert sio.getvalue() == 'foo'
+
+#
+# Issue 14151: Test invalid family on invalid environment
+#
+
+class TestInvalidFamily(unittest.TestCase):
+
+ @unittest.skipIf(WIN32, "skipped on Windows")
+ def test_invalid_family(self):
+ with self.assertRaises(ValueError):
+ multiprocessing.connection.Listener(r'\\.\test')
+
+ @unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
+ def test_invalid_family_win32(self):
+ with self.assertRaises(ValueError):
+ multiprocessing.connection.Listener('/var/test.pipe')
+
+
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
- TestStdinBadfiledescriptor]
+ TestStdinBadfiledescriptor, TestInvalidFamily]
#
#
diff --git a/Lib/test/test_queue.py b/Lib/test/test_queue.py
index 8287a5ddf6..86ad9c0765 100644
--- a/Lib/test/test_queue.py
+++ b/Lib/test/test_queue.py
@@ -82,7 +82,7 @@ class BlockingTestMixin:
self.fail("trigger thread ended but event never set")
-class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
+class BaseQueueTestMixin(BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
@@ -229,13 +229,13 @@ class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
with self.assertRaises(queue.Full):
q.put_nowait(4)
-class QueueTest(BaseQueueTest):
+class QueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.Queue
-class LifoQueueTest(BaseQueueTest):
+class LifoQueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.LifoQueue
-class PriorityQueueTest(BaseQueueTest):
+class PriorityQueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.PriorityQueue
diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py
index fe8bc34039..940ba39058 100644
--- a/Lib/test/test_re.py
+++ b/Lib/test/test_re.py
@@ -1,4 +1,5 @@
-from test.support import verbose, run_unittest
+from test.support import verbose, run_unittest, gc_collect
+import io
import re
from re import Scanner
import sys
@@ -16,6 +17,17 @@ import unittest
class ReTests(unittest.TestCase):
+ def test_keep_buffer(self):
+ # See bug 14212
+ b = bytearray(b'x')
+ it = re.finditer(b'a', b)
+ with self.assertRaises(BufferError):
+ b.extend(b'x'*400)
+ list(it)
+ del it
+ gc_collect()
+ b.extend(b'x'*400)
+
def test_weakref(self):
s = 'QabbbcR'
x = re.compile('ab+c')
@@ -355,6 +367,32 @@ class ReTests(unittest.TestCase):
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
+ def test_string_boundaries(self):
+ # See http://bugs.python.org/issue10713
+ self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1),
+ "abc")
+ # There's a word boundary at the start of a string.
+ self.assertTrue(re.match(r"\b", "abc"))
+ # A non-empty string includes a non-boundary zero-length match.
+ self.assertTrue(re.search(r"\B", "abc"))
+ # There is no non-boundary match at the start of a string.
+ self.assertFalse(re.match(r"\B", "abc"))
+ # However, an empty string contains no word boundaries, and also no
+ # non-boundaries.
+ self.assertEqual(re.search(r"\B", ""), None)
+ # This one is questionable and different from the perlre behaviour,
+ # but describes current behavior.
+ self.assertEqual(re.search(r"\b", ""), None)
+ # A single word-character string has two boundaries, but no
+ # non-boundary gaps.
+ self.assertEqual(len(re.findall(r"\b", "a")), 2)
+ self.assertEqual(len(re.findall(r"\B", "a")), 0)
+ # If there are no words, there are no boundaries
+ self.assertEqual(len(re.findall(r"\b", " ")), 0)
+ self.assertEqual(len(re.findall(r"\b", " ")), 0)
+ # Can match around the whitespace.
+ self.assertEqual(len(re.findall(r"\B", " ")), 2)
+
def test_bigcharset(self):
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222").group(1), "\u2222")
@@ -780,6 +818,16 @@ class ReTests(unittest.TestCase):
self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow])
self.assertRaises(TypeError, _sre.compile, {}, 0, [])
+ def test_compile(self):
+ # Test return value when given string and pattern as parameter
+ pattern = re.compile('random pattern')
+ self.assertIsInstance(pattern, re._pattern_type)
+ same_pattern = re.compile(pattern)
+ self.assertIsInstance(same_pattern, re._pattern_type)
+ self.assertIs(same_pattern, pattern)
+ # Test behaviour when not given a string or pattern as parameter
+ self.assertRaises(TypeError, re.compile, 0)
+
def run_re_tests():
from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py
index d77b7dc9ed..cce0d1b6eb 100644
--- a/Lib/test/test_socket.py
+++ b/Lib/test/test_socket.py
@@ -951,6 +951,7 @@ class BasicTCPTest(SocketConnectedTest):
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
+ self.assertTrue(self.cli_conn._closed)
self.assertRaises(socket.error, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
diff --git a/Lib/test/test_socketserver.py b/Lib/test/test_socketserver.py
index b3660c1e6c..ca23301cd1 100644
--- a/Lib/test/test_socketserver.py
+++ b/Lib/test/test_socketserver.py
@@ -8,6 +8,8 @@ import os
import select
import signal
import socket
+import select
+import errno
import tempfile
import unittest
import socketserver
@@ -32,8 +34,11 @@ def signal_alarm(n):
if hasattr(signal, 'alarm'):
signal.alarm(n)
+# Remember real select() to avoid interferences with mocking
+_real_select = select.select
+
def receive(sock, n, timeout=20):
- r, w, x = select.select([sock], [], [], timeout)
+ r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
@@ -226,6 +231,38 @@ class SocketServerTest(unittest.TestCase):
socketserver.DatagramRequestHandler,
self.dgram_examine)
+ @contextlib.contextmanager
+ def mocked_select_module(self):
+ """Mocks the select.select() call to raise EINTR for first call"""
+ old_select = select.select
+
+ class MockSelect:
+ def __init__(self):
+ self.called = 0
+
+ def __call__(self, *args):
+ self.called += 1
+ if self.called == 1:
+ # raise the exception on first call
+ raise select.error(errno.EINTR, os.strerror(errno.EINTR))
+ else:
+ # Return real select value for consecutive calls
+ return old_select(*args)
+
+ select.select = MockSelect()
+ try:
+ yield select.select
+ finally:
+ select.select = old_select
+
+ def test_InterruptServerSelectCall(self):
+ with self.mocked_select_module() as mock_select:
+ pid = self.run_server(socketserver.TCPServer,
+ socketserver.StreamRequestHandler,
+ self.stream_examine)
+ # Make sure select was called again:
+ self.assertGreater(mock_select.called, 1)
+
# Alas, on Linux (at least) recvfrom() doesn't return a meaningful
# client address so this cannot work:
diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py
index 89c08ede7b..98d759b9f1 100644
--- a/Lib/test/test_strptime.py
+++ b/Lib/test/test_strptime.py
@@ -38,9 +38,9 @@ class LocaleTime_Tests(unittest.TestCase):
comparison = testing[self.time_tuple[tuple_position]]
self.assertIn(strftime_output, testing,
"%s: not found in tuple" % error_msg)
- self.assertTrue(comparison == strftime_output,
- "%s: position within tuple incorrect; %s != %s" %
- (error_msg, comparison, strftime_output))
+ self.assertEqual(comparison, strftime_output,
+ "%s: position within tuple incorrect; %s != %s" %
+ (error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
@@ -65,8 +65,8 @@ class LocaleTime_Tests(unittest.TestCase):
"AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
- self.assertTrue(strftime_output == self.LT_ins.am_pm[position],
- "AM/PM representation in the wrong position within the tuple")
+ self.assertEqual(self.LT_ins.am_pm[position], strftime_output,
+ "AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
@@ -86,17 +86,14 @@ class LocaleTime_Tests(unittest.TestCase):
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
- self.assertTrue(strftime_output == time.strftime(self.LT_ins.LC_date_time,
- magic_date),
- "LC_date_time incorrect")
+ self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date),
+ strftime_output, "LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
- self.assertTrue(strftime_output == time.strftime(self.LT_ins.LC_date,
- magic_date),
- "LC_date incorrect")
+ self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date),
+ strftime_output, "LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
- self.assertTrue(strftime_output == time.strftime(self.LT_ins.LC_time,
- magic_date),
- "LC_time incorrect")
+ self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date),
+ strftime_output, "LC_time incorrect")
LT = _strptime.LocaleTime()
LT.am_pm = ('', '')
self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle "
@@ -168,8 +165,8 @@ class TimeRETests(unittest.TestCase):
# Fixes bug #661354
test_locale = _strptime.LocaleTime()
test_locale.timezone = (frozenset(), frozenset())
- self.assertTrue(_strptime.TimeRE(test_locale).pattern("%Z") == '',
- "with timezone == ('',''), TimeRE().pattern('%Z') != ''")
+ self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '',
+ "with timezone == ('',''), TimeRE().pattern('%Z') != ''")
def test_matching_with_escapes(self):
# Make sure a format that requires escaping of characters works
@@ -195,7 +192,7 @@ class TimeRETests(unittest.TestCase):
# so as to not allow to subpatterns to end up next to each other and
# "steal" characters from each other.
pattern = self.time_re.pattern('%j %H')
- self.assertTrue(not re.match(pattern, "180"))
+ self.assertFalse(re.match(pattern, "180"))
self.assertTrue(re.match(pattern, "18 0"))
diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py
index fb0b834246..6150e88c54 100644
--- a/Lib/test/test_subprocess.py
+++ b/Lib/test/test_subprocess.py
@@ -989,6 +989,27 @@ class POSIXProcessTestCase(BaseTestCase):
getattr(p, method)(*args)
return p
+ def _kill_dead_process(self, method, *args):
+ # Do not inherit file handles from the parent.
+ # It should fix failures on some platforms.
+ p = subprocess.Popen([sys.executable, "-c", """if 1:
+ import sys, time
+ sys.stdout.write('x\\n')
+ sys.stdout.flush()
+ """],
+ close_fds=True,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ # Wait for the interpreter to be completely initialized before
+ # sending any signal.
+ p.stdout.read(1)
+ # The process should end after this
+ time.sleep(1)
+ # This shouldn't raise even though the child is now dead
+ getattr(p, method)(*args)
+ p.communicate()
+
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
@@ -1007,6 +1028,18 @@ class POSIXProcessTestCase(BaseTestCase):
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
+ def test_send_signal_dead(self):
+ # Sending a signal to a dead process
+ self._kill_dead_process('send_signal', signal.SIGINT)
+
+ def test_kill_dead(self):
+ # Killing a dead process
+ self._kill_dead_process('kill')
+
+ def test_terminate_dead(self):
+ # Terminating a dead process
+ self._kill_dead_process('terminate')
+
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
@@ -1568,6 +1601,31 @@ class Win32ProcessTestCase(BaseTestCase):
returncode = p.wait()
self.assertNotEqual(returncode, 0)
+ def _kill_dead_process(self, method, *args):
+ p = subprocess.Popen([sys.executable, "-c", """if 1:
+ import sys, time
+ sys.stdout.write('x\\n')
+ sys.stdout.flush()
+ sys.exit(42)
+ """],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ self.addCleanup(p.stdout.close)
+ self.addCleanup(p.stderr.close)
+ self.addCleanup(p.stdin.close)
+ # Wait for the interpreter to be completely initialized before
+ # sending any signal.
+ p.stdout.read(1)
+ # The process should end after this
+ time.sleep(1)
+ # This shouldn't raise even though the child is now dead
+ getattr(p, method)(*args)
+ _, stderr = p.communicate()
+ self.assertStderrEqual(stderr, b'')
+ rc = p.wait()
+ self.assertEqual(rc, 42)
+
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
@@ -1577,6 +1635,15 @@ class Win32ProcessTestCase(BaseTestCase):
def test_terminate(self):
self._kill_process('terminate')
+ def test_send_signal_dead(self):
+ self._kill_dead_process('send_signal', signal.SIGTERM)
+
+ def test_kill_dead(self):
+ self._kill_dead_process('kill')
+
+ def test_terminate_dead(self):
+ self._kill_dead_process('terminate')
+
# The module says:
# "NB This only works (and is only relevant) for UNIX."
diff --git a/Lib/test/test_thread.py b/Lib/test/test_thread.py
index 894a49392e..41fb3acd7b 100644
--- a/Lib/test/test_thread.py
+++ b/Lib/test/test_thread.py
@@ -128,6 +128,29 @@ class ThreadRunningTests(BasicThreadTest):
time.sleep(0.01)
self.assertEqual(thread._count(), orig)
+ def test_save_exception_state_on_error(self):
+ # See issue #14474
+ def task():
+ started.release()
+ raise SyntaxError
+ def mywrite(self, *args):
+ try:
+ raise ValueError
+ except ValueError:
+ pass
+ real_write(self, *args)
+ c = thread._count()
+ started = thread.allocate_lock()
+ with support.captured_output("stderr") as stderr:
+ real_write = stderr.write
+ stderr.write = mywrite
+ started.acquire()
+ thread.start_new_thread(task, ())
+ started.acquire()
+ while thread._count() > c:
+ pass
+ self.assertIn("Traceback", stderr.getvalue())
+
class Barrier:
def __init__(self, num_threads):
diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py
index 32637b5b29..dfc0ddf49c 100644
--- a/Lib/test/test_threading.py
+++ b/Lib/test/test_threading.py
@@ -624,6 +624,7 @@ class ThreadJoinOnShutdown(BaseTestCase):
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
+ @unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_6_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py
new file mode 100644
index 0000000000..cfe13acc2d
--- /dev/null
+++ b/Lib/test/test_tools.py
@@ -0,0 +1,108 @@
+"""Tests for scripts in the Tools directory.
+
+This file contains regression tests for some of the scripts found in the
+Tools directory of a Python checkout or tarball, such as reindent.py.
+"""
+
+import os
+import sys
+import imp
+import unittest
+import sysconfig
+import tempfile
+from test import support
+from test.script_helper import assert_python_ok
+
+if not sysconfig.is_python_build():
+ # XXX some installers do contain the tools, should we detect that
+ # and run the tests in that case too?
+ raise unittest.SkipTest('test irrelevant for an installed Python')
+
+srcdir = sysconfig.get_config_var('projectbase')
+basepath = os.path.join(os.getcwd(), srcdir, 'Tools')
+scriptsdir = os.path.join(basepath, 'scripts')
+
+
+class ReindentTests(unittest.TestCase):
+ script = os.path.join(scriptsdir, 'reindent.py')
+
+ def test_noargs(self):
+ assert_python_ok(self.script)
+
+ def test_help(self):
+ rc, out, err = assert_python_ok(self.script, '-h')
+ self.assertEqual(out, b'')
+ self.assertGreater(err, b'')
+
+
+class TestSundryScripts(unittest.TestCase):
+ # At least make sure the rest don't have syntax errors. When tests are
+ # added for a script it should be added to the whitelist below.
+
+ # scripts that have independent tests.
+ whitelist = ['reindent.py']
+ # scripts that can't be imported without running
+ blacklist = ['make_ctype.py']
+ # scripts that use windows-only modules
+ windows_only = ['win_add2path.py']
+ # blacklisted for other reasons
+ other = ['analyze_dxp.py']
+
+ skiplist = blacklist + whitelist + windows_only + other
+
+ def setUp(self):
+ cm = support.DirsOnSysPath(scriptsdir)
+ cm.__enter__()
+ self.addCleanup(cm.__exit__)
+
+ def test_sundry(self):
+ for fn in os.listdir(scriptsdir):
+ if fn.endswith('.py') and fn not in self.skiplist:
+ __import__(fn[:-3])
+
+ @unittest.skipIf(sys.platform != "win32", "Windows-only test")
+ def test_sundry_windows(self):
+ for fn in self.windows_only:
+ __import__(fn[:-3])
+
+ @unittest.skipIf(not support.threading, "test requires _thread module")
+ def test_analyze_dxp_import(self):
+ if hasattr(sys, 'getdxp'):
+ import analyze_dxp
+ else:
+ with self.assertRaises(RuntimeError):
+ import analyze_dxp
+
+
+class PdepsTests(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(self):
+ path = os.path.join(scriptsdir, 'pdeps.py')
+ self.pdeps = imp.load_source('pdeps', path)
+
+ @classmethod
+ def tearDownClass(self):
+ if 'pdeps' in sys.modules:
+ del sys.modules['pdeps']
+
+ def test_process_errors(self):
+ # Issue #14492: m_import.match(line) can be None.
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fn = os.path.join(tmpdir, 'foo')
+ with open(fn, 'w') as stream:
+ stream.write("#!/this/will/fail")
+ self.pdeps.process(fn, {})
+
+ def test_inverse_attribute_error(self):
+ # Issue #14492: this used to fail with an AttributeError.
+ self.pdeps.inverse({'a': []})
+
+
+def test_main():
+ support.run_unittest(*[obj for obj in globals().values()
+ if isinstance(obj, type)])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py
index 259a181b3b..19b06a034d 100644
--- a/Lib/test/test_unicode.py
+++ b/Lib/test/test_unicode.py
@@ -891,12 +891,15 @@ class UnicodeTest(string_tests.CommonTest,
self.assertEqual('{foo._x}'.format_map({'foo': C(20)}), '20')
# test various errors
- self.assertRaises(TypeError, '{'.format_map)
- self.assertRaises(TypeError, '}'.format_map)
- self.assertRaises(TypeError, 'a{'.format_map)
- self.assertRaises(TypeError, 'a}'.format_map)
- self.assertRaises(TypeError, '{a'.format_map)
- self.assertRaises(TypeError, '}a'.format_map)
+ self.assertRaises(TypeError, ''.format_map)
+ self.assertRaises(TypeError, 'a'.format_map)
+
+ self.assertRaises(ValueError, '{'.format_map, {})
+ self.assertRaises(ValueError, '}'.format_map, {})
+ self.assertRaises(ValueError, 'a{'.format_map, {})
+ self.assertRaises(ValueError, 'a}'.format_map, {})
+ self.assertRaises(ValueError, '{a'.format_map, {})
+ self.assertRaises(ValueError, '}a'.format_map, {})
# issue #12579: can't supply positional params to format_map
self.assertRaises(ValueError, '{}'.format_map, {'a' : 2})
diff --git a/Lib/test/test_weakref.py b/Lib/test/test_weakref.py
index 8b5bbc3d36..74b9a87852 100644
--- a/Lib/test/test_weakref.py
+++ b/Lib/test/test_weakref.py
@@ -812,11 +812,71 @@ class Object:
def __hash__(self):
return hash(self.arg)
+class RefCycle:
+ def __init__(self):
+ self.cycle = self
+
class MappingTestCase(TestBase):
COUNT = 10
+ def check_len_cycles(self, dict_type, cons):
+ N = 20
+ items = [RefCycle() for i in range(N)]
+ dct = dict_type(cons(o) for o in items)
+ # Keep an iterator alive
+ it = dct.items()
+ try:
+ next(it)
+ except StopIteration:
+ pass
+ del items
+ gc.collect()
+ n1 = len(dct)
+ del it
+ gc.collect()
+ n2 = len(dct)
+ # one item may be kept alive inside the iterator
+ self.assertIn(n1, (0, 1))
+ self.assertEqual(n2, 0)
+
+ def test_weak_keyed_len_cycles(self):
+ self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
+
+ def test_weak_valued_len_cycles(self):
+ self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
+
+ def check_len_race(self, dict_type, cons):
+ # Extended sanity checks for len() in the face of cyclic collection
+ self.addCleanup(gc.set_threshold, *gc.get_threshold())
+ for th in range(1, 100):
+ N = 20
+ gc.collect(0)
+ gc.set_threshold(th, th, th)
+ items = [RefCycle() for i in range(N)]
+ dct = dict_type(cons(o) for o in items)
+ del items
+ # All items will be collected at next garbage collection pass
+ it = dct.items()
+ try:
+ next(it)
+ except StopIteration:
+ pass
+ n1 = len(dct)
+ del it
+ n2 = len(dct)
+ self.assertGreaterEqual(n1, 0)
+ self.assertLessEqual(n1, N)
+ self.assertGreaterEqual(n2, 0)
+ self.assertLessEqual(n2, n1)
+
+ def test_weak_keyed_len_race(self):
+ self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
+
+ def test_weak_valued_len_race(self):
+ self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
+
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
diff --git a/Lib/test/test_weakset.py b/Lib/test/test_weakset.py
index 3cddf40c24..4d3878f8a6 100644
--- a/Lib/test/test_weakset.py
+++ b/Lib/test/test_weakset.py
@@ -17,6 +17,10 @@ import contextlib
class Foo:
pass
+class RefCycle:
+ def __init__(self):
+ self.cycle = self
+
class TestWeakSet(unittest.TestCase):
@@ -24,6 +28,12 @@ class TestWeakSet(unittest.TestCase):
# need to keep references to them
self.items = [ustr(c) for c in ('a', 'b', 'c')]
self.items2 = [ustr(c) for c in ('x', 'y', 'z')]
+ self.ab_items = [ustr(c) for c in 'ab']
+ self.abcde_items = [ustr(c) for c in 'abcde']
+ self.def_items = [ustr(c) for c in 'def']
+ self.ab_weakset = WeakSet(self.ab_items)
+ self.abcde_weakset = WeakSet(self.abcde_items)
+ self.def_weakset = WeakSet(self.def_items)
self.letters = [ustr(c) for c in string.ascii_letters]
self.s = WeakSet(self.items)
self.d = dict.fromkeys(self.items)
@@ -67,6 +77,11 @@ class TestWeakSet(unittest.TestCase):
x = WeakSet(self.items + self.items2)
c = C(self.items2)
self.assertEqual(self.s.union(c), x)
+ del c
+ self.assertEqual(len(u), len(self.items) + len(self.items2))
+ self.items2.pop()
+ gc.collect()
+ self.assertEqual(len(u), len(self.items) + len(self.items2))
def test_or(self):
i = self.s.union(self.items2)
@@ -74,14 +89,19 @@ class TestWeakSet(unittest.TestCase):
self.assertEqual(self.s | frozenset(self.items2), i)
def test_intersection(self):
- i = self.s.intersection(self.items2)
+ s = WeakSet(self.letters)
+ i = s.intersection(self.items2)
for c in self.letters:
- self.assertEqual(c in i, c in self.d and c in self.items2)
- self.assertEqual(self.s, WeakSet(self.items))
+ self.assertEqual(c in i, c in self.items2 and c in self.letters)
+ self.assertEqual(s, WeakSet(self.letters))
self.assertEqual(type(i), WeakSet)
for C in set, frozenset, dict.fromkeys, list, tuple:
x = WeakSet([])
- self.assertEqual(self.s.intersection(C(self.items2)), x)
+ self.assertEqual(i.intersection(C(self.items)), x)
+ self.assertEqual(len(i), len(self.items2))
+ self.items2.pop()
+ gc.collect()
+ self.assertEqual(len(i), len(self.items2))
def test_isdisjoint(self):
self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
@@ -112,6 +132,10 @@ class TestWeakSet(unittest.TestCase):
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(i), WeakSet)
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
+ self.assertEqual(len(i), len(self.items) + len(self.items2))
+ self.items2.pop()
+ gc.collect()
+ self.assertEqual(len(i), len(self.items) + len(self.items2))
def test_xor(self):
i = self.s.symmetric_difference(self.items2)
@@ -119,22 +143,28 @@ class TestWeakSet(unittest.TestCase):
self.assertEqual(self.s ^ frozenset(self.items2), i)
def test_sub_and_super(self):
- pl, ql, rl = map(lambda s: [ustr(c) for c in s], ['ab', 'abcde', 'def'])
- p, q, r = map(WeakSet, (pl, ql, rl))
- self.assertTrue(p < q)
- self.assertTrue(p <= q)
- self.assertTrue(q <= q)
- self.assertTrue(q > p)
- self.assertTrue(q >= p)
- self.assertFalse(q < r)
- self.assertFalse(q <= r)
- self.assertFalse(q > r)
- self.assertFalse(q >= r)
+ self.assertTrue(self.ab_weakset <= self.abcde_weakset)
+ self.assertTrue(self.abcde_weakset <= self.abcde_weakset)
+ self.assertTrue(self.abcde_weakset >= self.ab_weakset)
+ self.assertFalse(self.abcde_weakset <= self.def_weakset)
+ self.assertFalse(self.abcde_weakset >= self.def_weakset)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
+ def test_lt(self):
+ self.assertTrue(self.ab_weakset < self.abcde_weakset)
+ self.assertFalse(self.abcde_weakset < self.def_weakset)
+ self.assertFalse(self.ab_weakset < self.ab_weakset)
+ self.assertFalse(WeakSet() < WeakSet())
+
+ def test_gt(self):
+ self.assertTrue(self.abcde_weakset > self.ab_weakset)
+ self.assertFalse(self.abcde_weakset > self.def_weakset)
+ self.assertFalse(self.ab_weakset > self.ab_weakset)
+ self.assertFalse(WeakSet() > WeakSet())
+
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
s = WeakSet(Foo() for i in range(1000))
@@ -359,6 +389,49 @@ class TestWeakSet(unittest.TestCase):
s.clear()
self.assertEqual(len(s), 0)
+ def test_len_cycles(self):
+ N = 20
+ items = [RefCycle() for i in range(N)]
+ s = WeakSet(items)
+ del items
+ it = iter(s)
+ try:
+ next(it)
+ except StopIteration:
+ pass
+ gc.collect()
+ n1 = len(s)
+ del it
+ gc.collect()
+ n2 = len(s)
+ # one item may be kept alive inside the iterator
+ self.assertIn(n1, (0, 1))
+ self.assertEqual(n2, 0)
+
+ def test_len_race(self):
+ # Extended sanity checks for len() in the face of cyclic collection
+ self.addCleanup(gc.set_threshold, *gc.get_threshold())
+ for th in range(1, 100):
+ N = 20
+ gc.collect(0)
+ gc.set_threshold(th, th, th)
+ items = [RefCycle() for i in range(N)]
+ s = WeakSet(items)
+ del items
+ # All items will be collected at next garbage collection pass
+ it = iter(s)
+ try:
+ next(it)
+ except StopIteration:
+ pass
+ n1 = len(s)
+ del it
+ n2 = len(s)
+ self.assertGreaterEqual(n1, 0)
+ self.assertLessEqual(n1, N)
+ self.assertGreaterEqual(n2, 0)
+ self.assertLessEqual(n2, n1)
+
def test_main(verbose=None):
support.run_unittest(TestWeakSet)
diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py
index dffa2ca0da..60081e241c 100644
--- a/Lib/test/test_zlib.py
+++ b/Lib/test/test_zlib.py
@@ -66,24 +66,11 @@ class ChecksumTestCase(unittest.TestCase):
# Issue #10276 - check that inputs >=4GB are handled correctly.
class ChecksumBigBufferTestCase(unittest.TestCase):
- def setUp(self):
- with open(support.TESTFN, "wb+") as f:
- f.seek(_4G)
- f.write(b"asdf")
- f.flush()
- self.mapping = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
-
- def tearDown(self):
- self.mapping.close()
- support.unlink(support.TESTFN)
-
- @unittest.skipUnless(mmap, "mmap() is not available.")
- @unittest.skipUnless(sys.maxsize > _4G, "Can't run on a 32-bit system.")
- @unittest.skipUnless(support.is_resource_enabled("largefile"),
- "May use lots of disk space.")
- def test_big_buffer(self):
- self.assertEqual(zlib.crc32(self.mapping), 3058686908)
- self.assertEqual(zlib.adler32(self.mapping), 82837919)
+ @bigmemtest(size=_4G + 4, memuse=1, dry_run=False)
+ def test_big_buffer(self, size):
+ data = b"nyan" * (_1G + 1)
+ self.assertEqual(zlib.crc32(data), 1044521549)
+ self.assertEqual(zlib.adler32(data), 2256789997)
class ExceptionTestCase(unittest.TestCase):
diff --git a/Lib/tkinter/ttk.py b/Lib/tkinter/ttk.py
index 928e1de781..5ae20a8678 100644
--- a/Lib/tkinter/ttk.py
+++ b/Lib/tkinter/ttk.py
@@ -1253,7 +1253,7 @@ class Treeview(Widget, tkinter.XView, tkinter.YView):
def exists(self, item):
- """Returns True if the specified item is present in the three,
+ """Returns True if the specified item is present in the tree,
False otherwise."""
return bool(self.tk.call(self._w, "exists", item))
diff --git a/Lib/unittest/loader.py b/Lib/unittest/loader.py
index ab364002e2..541884e416 100644
--- a/Lib/unittest/loader.py
+++ b/Lib/unittest/loader.py
@@ -34,6 +34,11 @@ def _make_failed_test(classname, methodname, exception, suiteClass):
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
+def _jython_aware_splitext(path):
+ if path.lower().endswith('$py.class'):
+ return path[:-9]
+ return os.path.splitext(path)[0]
+
class TestLoader(object):
"""
@@ -221,7 +226,7 @@ class TestLoader(object):
return os.path.dirname(full_path)
def _get_name_from_path(self, path):
- path = os.path.splitext(os.path.normpath(path))[0]
+ path = _jython_aware_splitext(os.path.normpath(path))
_relpath = os.path.relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
@@ -258,11 +263,11 @@ class TestLoader(object):
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
- realpath = os.path.splitext(mod_file)[0]
- fullpath_noext = os.path.splitext(full_path)[0]
+ realpath = _jython_aware_splitext(mod_file)
+ fullpath_noext = _jython_aware_splitext(full_path)
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
- mod_name = os.path.splitext(os.path.basename(full_path))[0]
+ mod_name = _jython_aware_splitext(os.path.basename(full_path))
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
diff --git a/Lib/urllib/request.py b/Lib/urllib/request.py
index 94b713e885..fe2cfcdbbe 100644
--- a/Lib/urllib/request.py
+++ b/Lib/urllib/request.py
@@ -1062,8 +1062,9 @@ class AbstractHTTPHandler(BaseHandler):
if request.data is not None: # POST
data = request.data
if isinstance(data, str):
- raise TypeError("POST data should be bytes"
- " or an iterable of bytes. It cannot be str.")
+ msg = "POST data should be bytes or an iterable of bytes."\
+ "It cannot be str"
+ raise TypeError(msg)
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
diff --git a/Lib/urllib/response.py b/Lib/urllib/response.py
index 8c6dcca5aa..ffaa5fa6fc 100644
--- a/Lib/urllib/response.py
+++ b/Lib/urllib/response.py
@@ -61,11 +61,11 @@ class addclosehook(addbase):
self.hookargs = hookargs
def close(self):
- addbase.close(self)
if self.closehook:
self.closehook(*self.hookargs)
self.closehook = None
self.hookargs = None
+ addbase.close(self)
class addinfo(addbase):
"""class to add an info() method to an open file."""
diff --git a/Lib/weakref.py b/Lib/weakref.py
index 468f8f1029..fcb6b74d1b 100644
--- a/Lib/weakref.py
+++ b/Lib/weakref.py
@@ -78,7 +78,7 @@ class WeakValueDictionary(collections.MutableMapping):
del self.data[key]
def __len__(self):
- return sum(wr() is not None for wr in self.data.values())
+ return len(self.data) - len(self._pending_removals)
def __contains__(self, key):
try:
@@ -290,7 +290,7 @@ class WeakKeyDictionary(collections.MutableMapping):
return self.data[ref(key)]
def __len__(self):
- return len(self.data)
+ return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
diff --git a/Lib/xmlrpc/server.py b/Lib/xmlrpc/server.py
index d7ed3f32f7..fd17026583 100644
--- a/Lib/xmlrpc/server.py
+++ b/Lib/xmlrpc/server.py
@@ -1,4 +1,4 @@
-"""XML-RPC Servers.
+r"""XML-RPC Servers.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
diff --git a/Mac/README b/Mac/README
index 0261127076..6853db44c6 100644
--- a/Mac/README
+++ b/Mac/README
@@ -66,7 +66,7 @@ flag to configure::
$ make
$ make install
-This flag can be used a framework build of python, but also with a classic
+This flag can be used with a framework build of python, but also with a classic
unix build. Either way you will have to build python on Mac OS X 10.4 (or later)
with Xcode 2.1 (or later). You also have to install the 10.4u SDK when
installing Xcode.
@@ -214,8 +214,8 @@ How do I create a binary distribution?
Go to the directory "Mac/OSX/BuildScript". There you'll find a script
"build-installer.py" that does all the work. This will download and build
-a number of 3th-party libaries, configures and builds a framework Python,
-installs it, creates the installer pacakge files and then packs this in a
+a number of 3rd-party libaries, configures and builds a framework Python,
+installs it, creates the installer package files and then packs this in a
DMG image.
The script will build a universal binary, you'll therefore have to run this
@@ -251,8 +251,8 @@ architectures. Temporarily move ``/usr/local`` aside to finish the build.
Uninstalling a framework install, including the binary installer
================================================================
-Uninstalling a framework can be done by manually removing all bits that got installed,
-that's true for both installations from source and installations using the binary installer.
+Uninstalling a framework can be done by manually removing all bits that got installed.
+That's true for both installations from source and installations using the binary installer.
Sadly enough OSX does not have a central uninstaller.
The main bit of a framework install is the framework itself, installed in
diff --git a/Makefile.pre.in b/Makefile.pre.in
index 3008d6d73a..9cc15c963e 100644
--- a/Makefile.pre.in
+++ b/Makefile.pre.in
@@ -161,7 +161,7 @@ SRCDIRS= @SRCDIRS@
SUBDIRSTOO= Include Lib Misc
# Files and directories to be distributed
-CONFIGFILES= configure configure.in acconfig.h pyconfig.h.in Makefile.pre.in
+CONFIGFILES= configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in
DISTFILES= README ChangeLog $(CONFIGFILES)
DISTDIRS= $(SUBDIRS) $(SUBDIRSTOO) Ext-dummy
DIST= $(DISTFILES) $(DISTDIRS)
@@ -917,7 +917,8 @@ XMLLIBSUBDIRS= xml xml/dom xml/etree xml/parsers xml/sax
LIBSUBDIRS= tkinter tkinter/test tkinter/test/test_tkinter \
tkinter/test/test_ttk site-packages test \
test/capath test/data \
- test/cjkencodings test/decimaltestdata test/xmltestdata test/subprocessdata \
+ test/cjkencodings test/decimaltestdata test/xmltestdata \
+ test/subprocessdata test/sndhdrdata \
test/tracedmodules test/encoded_modules \
concurrent concurrent/futures encodings \
email email/mime email/test email/test/data \
@@ -1210,7 +1211,7 @@ recheck:
$(SHELL) config.status --recheck
$(SHELL) config.status
-# Rebuild the configure script from configure.in; also rebuild pyconfig.h.in
+# Rebuild the configure script from configure.ac; also rebuild pyconfig.h.in
autoconf:
(cd $(srcdir); autoconf -Wall)
(cd $(srcdir); autoheader -Wall)
diff --git a/Misc/ACKS b/Misc/ACKS
index 36d7e84345..3693c1c575 100644
--- a/Misc/ACKS
+++ b/Misc/ACKS
@@ -173,6 +173,7 @@ Mike Clarkson
Andrew Clegg
Brad Clements
Steve Clift
+Hervé Coatanhay
Nick Coghlan
Josh Cogliati
Dave Cole
@@ -341,6 +342,7 @@ Johannes Gijsbers
Michael Gilfix
Christoph Gohlke
Tim Golden
+Guilherme Gonçalves
Chris Gonnerman
David Goodger
Hans de Graaff
@@ -429,6 +431,7 @@ Jeremy Hylton
Gerhard Häring
Fredrik Håård
Mihai Ibanescu
+Ali Ikinci
Lars Immisch
Bobby Impollonia
Meador Inge
@@ -507,6 +510,7 @@ Damon Kohler
Marko Kohtala
Vlad Korolev
Joseph Koshy
+Jerzy Kozera
Maksim Kozyarchuk
Stefan Krah
Bob Kras
@@ -551,6 +555,7 @@ John Lenton
Christopher Tur Lesniewski-Laas
Mark Levinson
William Lewis
+Akira Li
Xuanji Li
Robert van Liere
Ross Light
@@ -627,6 +632,7 @@ Roman Milner
Andrii V. Mishkovskyi
Dustin J. Mitchell
Dom Mitchell
+Florian Mladitsch
Doug Moen
The Dragon De Monsyne
Skip Montanaro
@@ -665,6 +671,7 @@ Stefan Norberg
Tim Northover
Joe Norton
Neal Norwitz
+Mikhail Novikov
Michal Nowikowski
Steffen Daode Nurpmeso
Nigel O'Brian
diff --git a/Misc/NEWS b/Misc/NEWS
index 5e03fb334b..ebdfa7c831 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -2,6 +2,168 @@
Python News
+++++++++++
+What's New in Python 3.2.4
+==========================
+
+*Release date: XX-XX-XXXX*
+
+Core and Builtins
+-----------------
+
+- Issue #14474: Save and restore exception state in thread.start_new_thread()
+ while writing error message if the thread leaves a unhandled exception.
+
+- Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch
+ by Suman Saha.
+
+- Issue #14378: Fix compiling ast.ImportFrom nodes with a "__future__" string as
+ the module name that was not interned.
+
+- Issue #14331: Use significantly less stack space when importing modules by
+ allocating path buffers on the heap instead of the stack.
+
+- Issue #14334: Prevent in a segfault in type.__getattribute__ when it was not
+ passed strings.
+
+- Issue #1469629: Allow cycles through an object's __dict__ slot to be
+ collected. (For example if ``x.__dict__ is x``).
+
+- Issue #14172: Fix reference leak when marshalling a buffer-like object
+ (other than a bytes object).
+
+- Issue #13521: dict.setdefault() now does only one lookup for the given key,
+ making it "atomic" for many purposes. Patch by Filip Gruszczyński.
+
+- Issue #14471: Fix a possible buffer overrun in the winreg module.
+
+Library
+-------
+
+- Issue #7978: socketserver now restarts the select() call when EINTR is
+ returned. This avoids crashing the server loop when a signal is received.
+ Patch by Jerzy Kozera.
+
+- Issue #14496: Fix wrong name in idlelib/tabbedpages.py.
+ Patch by Popa Claudiu.
+
+- Issue #14482: Raise a ValueError, not a NameError, when trying to create
+ a multiprocessing Client or Listener with an AF_UNIX type address under
+ Windows. Patch by Popa Claudiu.
+
+- Issue #14151: Raise a ValueError, not a NameError, when trying to create
+ a multiprocessing Client or Listener with an AF_PIPE type address under
+ non-Windows platforms. Patch by Popa Claudiu.
+
+- Issue #13872: socket.detach() now marks the socket closed (as mirrored
+ in the socket repr()). Patch by Matt Joiner.
+
+- Issue #14406: Fix a race condition when using ``concurrent.futures.wait(
+ return_when=ALL_COMPLETED)``. Patch by Matt Joiner.
+
+- Issue #14409: IDLE now properly executes commands in the Shell window
+ when it cannot read the normal config files on startup and
+ has to use the built-in default key bindings.
+ There was previously a bug in one of the defaults.
+
+- Issue #10340: asyncore - properly handle EINVAL in dispatcher constructor on
+ OSX; avoid to call handle_connect in case of a disconnected socket which
+ was not meant to connect.
+
+- Issue #12757: Fix the skipping of doctests when python is run with -OO so
+ that it works in unittest's verbose mode as well as non-verbose mode.
+
+- Issue #3573: IDLE hangs when passing invalid command line args
+ (directory(ies) instead of file(s)) (Patch by Guilherme Polo)
+
+- Issue #13694: asynchronous connect in asyncore.dispatcher does not set addr
+ attribute.
+
+- Issue #11686: Added missing entries to email package __all__ lists
+ (mostly the new Bytes classes).
+
+- Issue #10484: Fix the CGIHTTPServer's PATH_INFO handling problem.
+
+- Issue #11199: Fix the with urllib which hangs on particular ftp urls.
+
+- Issue #14062: Header objects now correctly respect the 'linesep' setting
+ when processed by BytesParser (which smtplib.SMTP.send_message uses).
+
+- Issue #14291: Email now defaults to utf-8 for non-ASCII unicode headers
+ instead of raising an error. This fixes a regression relative to 2.7.
+
+- Issue #5219: Prevent event handler cascade in IDLE.
+
+- Issue #14184: Increase the default stack size for secondary threads on
+ Mac OS X to avoid interpreter crashes when using threads on 10.7.
+
+- Issue #10543: Fix unittest test discovery with Jython bytecode files.
+
+- Issue #14252: Fix subprocess.Popen.terminate() to not raise an error under
+ Windows when the child process has already exited.
+
+- Issue #14195: An issue that caused weakref.WeakSet instances to incorrectly
+ return True for a WeakSet instance 'a' in both 'a < a' and 'a > a' has been
+ fixed.
+
+- Issue #14177: marshal.loads() now raises TypeError when given an unicode
+ string. Patch by Guilherme Gonçalves.
+
+- Issue #14159: Fix the len() of weak containers (WeakSet, WeakKeyDictionary,
+ WeakValueDictionary) to return a better approximation when some objects
+ are dead or dying. Moreover, the implementation is now O(1) rather than
+ O(n).
+
+- Issue #13125: Silence spurious test_lib2to3 output when in non-verbose mode.
+ Patch by Mikhail Novikov.
+
+- Issue #13447: Add a test file to host regression tests for bugs in the
+ scripts found in the Tools directory.
+
+- Issue #8033: sqlite3: Fix 64-bit integer handling in user functions
+ on 32-bit architectures. Initial patch by Philippe Devalkeneer.
+
+Extension Modules
+-----------------
+
+- Issue #14212: The re module didn't retain a reference to buffers it was
+ scanning, resulting in segfaults.
+
+Build
+-----
+
+- Issue #14437: Fix building the _io module under Cygwin.
+
+- Issue #14387: Do not include accu.h from Python.h.
+
+- Issue #14359: Only use O_CLOEXEC in _posixmodule.c if it is defined.
+ Based on patch from Hervé Coatanhay.
+
+Documentation
+-------------
+
+- Issue #8799: Fix and improve the threading.Condition documentation.
+
+
+What's New in Python 3.2.3 release candidate 2?
+===============================================
+
+*Release date: XX-Mar-2012*
+
+Library
+-------
+
+- Issue #6884: Fix long-standing bugs with MANIFEST.in parsing in distutils
+ on Windows.
+
+Extension Modules
+-----------------
+
+- Issue #14234: CVE-2012-0876: Randomize hashes of xml attributes in the hash
+ table internal to the pyexpat module's copy of the expat library to avoid a
+ denial of service due to hash collisions. Patch by David Malcolm with some
+ modifications by the expat project.
+
+
What's New in Python 3.2.3?
===========================
@@ -156,9 +318,6 @@ Core and Builtins
Library
-------
-- Issue #8033: sqlite3: Fix 64-bit integer handling in user functions
- on 32-bit architectures. Initial patch by Philippe Devalkeneer.
-
- HTMLParser is now able to handle slashes in the start tag.
- Issue #14001: CVE-2012-0845: xmlrpc: Fix an endless loop in
diff --git a/Modules/_io/_iomodule.c b/Modules/_io/_iomodule.c
index 61362c73b2..4c90433ae9 100644
--- a/Modules/_io/_iomodule.c
+++ b/Modules/_io/_iomodule.c
@@ -58,7 +58,7 @@ PyDoc_STRVAR(module_doc,
"\n"
"At the top of the I/O hierarchy is the abstract base class IOBase. It\n"
"defines the basic interface to a stream. Note, however, that there is no\n"
-"seperation between reading and writing to streams; implementations are\n"
+"separation between reading and writing to streams; implementations are\n"
"allowed to throw an IOError if they do not support a given operation.\n"
"\n"
"Extending IOBase is RawIOBase which deals simply with the reading and\n"
diff --git a/Modules/_io/_iomodule.h b/Modules/_io/_iomodule.h
index 925e4f2cc7..c198b43e78 100644
--- a/Modules/_io/_iomodule.h
+++ b/Modules/_io/_iomodule.h
@@ -67,7 +67,7 @@ typedef struct {
PyObject *filename; /* Not used, but part of the IOError object */
Py_ssize_t written;
} PyBlockingIOErrorObject;
-PyAPI_DATA(PyObject *) PyExc_BlockingIOError;
+extern PyObject *PyExc_BlockingIOError;
/*
* Offset type for positioning.
diff --git a/Modules/_io/textio.c b/Modules/_io/textio.c
index 9c5f4416d1..baf0a971a7 100644
--- a/Modules/_io/textio.c
+++ b/Modules/_io/textio.c
@@ -627,7 +627,7 @@ PyDoc_STRVAR(textiowrapper_doc,
"enabled. With this enabled, on input, the lines endings '\\n', '\\r',\n"
"or '\\r\\n' are translated to '\\n' before being returned to the\n"
"caller. Conversely, on output, '\\n' is translated to the system\n"
- "default line seperator, os.linesep. If newline is any other of its\n"
+ "default line separator, os.linesep. If newline is any other of its\n"
"legal values, that newline becomes the newline when the file is read\n"
"and it is returned untranslated. On output, '\\n' is converted to the\n"
"newline.\n"
diff --git a/Modules/_posixsubprocess.c b/Modules/_posixsubprocess.c
index d520c8c769..81274e12c8 100644
--- a/Modules/_posixsubprocess.c
+++ b/Modules/_posixsubprocess.c
@@ -202,7 +202,18 @@ _close_open_fd_range_safe(int start_fd, int end_fd, PyObject* py_fds_to_keep)
int fd_dir_fd;
if (start_fd >= end_fd)
return;
- fd_dir_fd = open(FD_DIR, O_RDONLY | O_CLOEXEC, 0);
+#ifdef O_CLOEXEC
+ fd_dir_fd = open(FD_DIR, O_RDONLY | O_CLOEXEC, 0);
+#else
+ fd_dir_fd = open(FD_DIR, O_RDONLY, 0);
+#ifdef FD_CLOEXEC
+ {
+ int old = fcntl(fd_dir_fd, F_GETFD);
+ if (old != -1)
+ fcntl(fd_dir_fd, F_SETFD, old | FD_CLOEXEC);
+ }
+#endif
+#endif
if (fd_dir_fd == -1) {
/* No way to get a list of open fds. */
_close_fds_by_brute_force(start_fd, end_fd, py_fds_to_keep);
diff --git a/Modules/_sre.c b/Modules/_sre.c
index a363de2beb..472b5a3797 100644
--- a/Modules/_sre.c
+++ b/Modules/_sre.c
@@ -1664,7 +1664,7 @@ state_reset(SRE_STATE* state)
}
static void*
-getstring(PyObject* string, Py_ssize_t* p_length, int* p_charsize)
+getstring(PyObject* string, Py_ssize_t* p_length, int* p_charsize, Py_buffer *view)
{
/* given a python object, return a data pointer, a length (in
characters), and a character size. return NULL if the object
@@ -1674,7 +1674,6 @@ getstring(PyObject* string, Py_ssize_t* p_length, int* p_charsize)
Py_ssize_t size, bytes;
int charsize;
void* ptr;
- Py_buffer view;
/* Unicode objects do not support the buffer API. So, get the data
directly instead. */
@@ -1686,26 +1685,21 @@ getstring(PyObject* string, Py_ssize_t* p_length, int* p_charsize)
}
/* get pointer to string buffer */
- view.len = -1;
+ view->len = -1;
buffer = Py_TYPE(string)->tp_as_buffer;
if (!buffer || !buffer->bf_getbuffer ||
- (*buffer->bf_getbuffer)(string, &view, PyBUF_SIMPLE) < 0) {
+ (*buffer->bf_getbuffer)(string, view, PyBUF_SIMPLE) < 0) {
PyErr_SetString(PyExc_TypeError, "expected string or buffer");
return NULL;
}
/* determine buffer size */
- bytes = view.len;
- ptr = view.buf;
-
- /* Release the buffer immediately --- possibly dangerous
- but doing something else would require some re-factoring
- */
- PyBuffer_Release(&view);
+ bytes = view->len;
+ ptr = view->buf;
if (bytes < 0) {
PyErr_SetString(PyExc_TypeError, "buffer has negative size");
- return NULL;
+ goto err;
}
/* determine character size */
@@ -1719,7 +1713,7 @@ getstring(PyObject* string, Py_ssize_t* p_length, int* p_charsize)
#endif
else {
PyErr_SetString(PyExc_TypeError, "buffer size mismatch");
- return NULL;
+ goto err;
}
*p_length = size;
@@ -1728,8 +1722,13 @@ getstring(PyObject* string, Py_ssize_t* p_length, int* p_charsize)
if (ptr == NULL) {
PyErr_SetString(PyExc_ValueError,
"Buffer is NULL");
+ goto err;
}
return ptr;
+ err:
+ PyBuffer_Release(view);
+ view->buf = NULL;
+ return NULL;
}
LOCAL(PyObject*)
@@ -1747,20 +1746,21 @@ state_init(SRE_STATE* state, PatternObject* pattern, PyObject* string,
state->lastmark = -1;
state->lastindex = -1;
- ptr = getstring(string, &length, &charsize);
+ state->buffer.buf = NULL;
+ ptr = getstring(string, &length, &charsize, &state->buffer);
if (!ptr)
- return NULL;
+ goto err;
- if (charsize == 1 && pattern->charsize > 1) {
- PyErr_SetString(PyExc_TypeError,
+ if (charsize == 1 && pattern->charsize > 1) {
+ PyErr_SetString(PyExc_TypeError,
"can't use a string pattern on a bytes-like object");
- return NULL;
- }
- if (charsize > 1 && pattern->charsize == 1) {
- PyErr_SetString(PyExc_TypeError,
+ goto err;
+ }
+ if (charsize > 1 && pattern->charsize == 1) {
+ PyErr_SetString(PyExc_TypeError,
"can't use a bytes pattern on a string-like object");
- return NULL;
- }
+ goto err;
+ }
/* adjust boundaries */
if (start < 0)
@@ -1797,11 +1797,17 @@ state_init(SRE_STATE* state, PatternObject* pattern, PyObject* string,
state->lower = sre_lower;
return string;
+ err:
+ if (state->buffer.buf)
+ PyBuffer_Release(&state->buffer);
+ return NULL;
}
LOCAL(void)
state_fini(SRE_STATE* state)
{
+ if (state->buffer.buf)
+ PyBuffer_Release(&state->buffer);
Py_XDECREF(state->string);
data_stack_dealloc(state);
}
@@ -1863,6 +1869,8 @@ pattern_dealloc(PatternObject* self)
{
if (self->weakreflist != NULL)
PyObject_ClearWeakRefs((PyObject *) self);
+ if (self->view.buf)
+ PyBuffer_Release(&self->view);
Py_XDECREF(self->pattern);
Py_XDECREF(self->groupindex);
Py_XDECREF(self->indexgroup);
@@ -2297,6 +2305,7 @@ pattern_subx(PatternObject* self, PyObject* ptemplate, PyObject* string,
Py_ssize_t i, b, e;
int bint;
int filter_is_callable;
+ Py_buffer view;
if (PyCallable_Check(ptemplate)) {
/* sub/subn takes either a function or a template */
@@ -2306,7 +2315,8 @@ pattern_subx(PatternObject* self, PyObject* ptemplate, PyObject* string,
} else {
/* if not callable, check if it's a literal string */
int literal;
- ptr = getstring(ptemplate, &n, &bint);
+ view.buf = NULL;
+ ptr = getstring(ptemplate, &n, &bint, &view);
b = bint;
if (ptr) {
if (b == 1) {
@@ -2320,6 +2330,8 @@ pattern_subx(PatternObject* self, PyObject* ptemplate, PyObject* string,
PyErr_Clear();
literal = 0;
}
+ if (view.buf)
+ PyBuffer_Release(&view);
if (literal) {
filter = ptemplate;
Py_INCREF(filter);
@@ -2661,6 +2673,7 @@ _compile(PyObject* self_, PyObject* args)
Py_ssize_t groups = 0;
PyObject* groupindex = NULL;
PyObject* indexgroup = NULL;
+
if (!PyArg_ParseTuple(args, "OiO!|nOO", &pattern, &flags,
&PyList_Type, &code, &groups,
&groupindex, &indexgroup))
@@ -2675,6 +2688,7 @@ _compile(PyObject* self_, PyObject* args)
self->pattern = NULL;
self->groupindex = NULL;
self->indexgroup = NULL;
+ self->view.buf = NULL;
self->codesize = n;
@@ -2694,15 +2708,15 @@ _compile(PyObject* self_, PyObject* args)
return NULL;
}
- if (pattern == Py_None)
- self->charsize = -1;
- else {
- Py_ssize_t p_length;
- if (!getstring(pattern, &p_length, &self->charsize)) {
- Py_DECREF(self);
- return NULL;
- }
- }
+ if (pattern == Py_None)
+ self->charsize = -1;
+ else {
+ Py_ssize_t p_length;
+ if (!getstring(pattern, &p_length, &self->charsize, &self->view)) {
+ Py_DECREF(self);
+ return NULL;
+ }
+ }
Py_INCREF(pattern);
self->pattern = pattern;
diff --git a/Modules/_threadmodule.c b/Modules/_threadmodule.c
index ea038defb1..5f76a7b6c2 100644
--- a/Modules/_threadmodule.c
+++ b/Modules/_threadmodule.c
@@ -994,14 +994,17 @@ t_bootstrap(void *boot_raw)
PyErr_Clear();
else {
PyObject *file;
+ PyObject *exc, *value, *tb;
PySys_WriteStderr(
"Unhandled exception in thread started by ");
+ PyErr_Fetch(&exc, &value, &tb);
file = PySys_GetObject("stderr");
if (file != NULL && file != Py_None)
PyFile_WriteObject(boot->func, file, 0);
else
PyObject_Print(boot->func, stderr, 0);
PySys_WriteStderr("\n");
+ PyErr_Restore(exc, value, tb);
PyErr_PrintEx(0);
}
}
diff --git a/Modules/mathmodule.c b/Modules/mathmodule.c
index 29c32a30d8..2c4cc7331b 100644
--- a/Modules/mathmodule.c
+++ b/Modules/mathmodule.c
@@ -694,13 +694,13 @@ math_1_to_whatever(PyObject *arg, double (*func) (double),
return NULL;
}
if (Py_IS_INFINITY(r) && Py_IS_FINITE(x)) {
- if (can_overflow)
- PyErr_SetString(PyExc_OverflowError,
- "math range error"); /* overflow */
- else
- PyErr_SetString(PyExc_ValueError,
- "math domain error"); /* singularity */
- return NULL;
+ if (can_overflow)
+ PyErr_SetString(PyExc_OverflowError,
+ "math range error"); /* overflow */
+ else
+ PyErr_SetString(PyExc_ValueError,
+ "math domain error"); /* singularity */
+ return NULL;
}
if (Py_IS_FINITE(r) && errno && is_error(r))
/* this branch unnecessary on most platforms */
diff --git a/Modules/python.c b/Modules/python.c
index 935908af5d..cf9383f444 100644
--- a/Modules/python.c
+++ b/Modules/python.c
@@ -22,9 +22,9 @@ extern wchar_t* _Py_DecodeUTF8_surrogateescape(const char *s, Py_ssize_t size);
int
main(int argc, char **argv)
{
- wchar_t **argv_copy = (wchar_t **)PyMem_Malloc(sizeof(wchar_t*)*argc);
+ wchar_t **argv_copy = (wchar_t **)PyMem_Malloc(sizeof(wchar_t*)*(argc+1));
/* We need a second copies, as Python might modify the first one. */
- wchar_t **argv_copy2 = (wchar_t **)PyMem_Malloc(sizeof(wchar_t*)*argc);
+ wchar_t **argv_copy2 = (wchar_t **)PyMem_Malloc(sizeof(wchar_t*)*(argc+1));
int i, res;
char *oldloc;
/* 754 requires that FP exceptions run in "no stop" mode by default,
@@ -58,6 +58,8 @@ main(int argc, char **argv)
}
argv_copy2[i] = argv_copy[i];
}
+ argv_copy2[argc] = argv_copy[argc] = NULL;
+
setlocale(LC_ALL, oldloc);
free(oldloc);
res = Py_Main(argc, argv_copy);
diff --git a/Modules/sre.h b/Modules/sre.h
index 518c11db30..aec9b541dd 100644
--- a/Modules/sre.h
+++ b/Modules/sre.h
@@ -31,6 +31,7 @@ typedef struct {
int flags; /* flags used when compiling pattern source */
PyObject *weakreflist; /* List of weak references */
int charsize; /* pattern charsize (or -1) */
+ Py_buffer view;
/* pattern code */
Py_ssize_t codesize;
SRE_CODE code[1];
@@ -80,6 +81,7 @@ typedef struct {
char* data_stack;
size_t data_stack_size;
size_t data_stack_base;
+ Py_buffer buffer;
/* current repeat context */
SRE_REPEAT *repeat;
/* hooks */
diff --git a/Modules/timemodule.c b/Modules/timemodule.c
index 3ab6e9b845..626db3e251 100644
--- a/Modules/timemodule.c
+++ b/Modules/timemodule.c
@@ -504,7 +504,7 @@ time_strftime(PyObject *self, PyObject *args)
fmt = PyBytes_AS_STRING(format);
#endif
-#if defined(MS_WINDOWS)
+#if defined(MS_WINDOWS) && !defined(HAVE_WCSFTIME)
/* check that the format string contains only valid directives */
for(outbuf = strchr(fmt, '%');
outbuf != NULL;
@@ -516,7 +516,8 @@ time_strftime(PyObject *self, PyObject *args)
!strchr("aAbBcdHIjmMpSUwWxXyYzZ%", outbuf[1]))
{
PyErr_SetString(PyExc_ValueError, "Invalid format string");
- return 0;
+ Py_DECREF(format);
+ return NULL;
}
}
#endif
diff --git a/Objects/accu.c b/Objects/accu.c
index 88e8f08f59..5bd2ee41fe 100644
--- a/Objects/accu.c
+++ b/Objects/accu.c
@@ -1,6 +1,7 @@
/* Accumulator struct implementation */
#include "Python.h"
+#include "accu.h"
static PyObject *
join_list_unicode(PyObject *lst)
diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c
index 4202ff28e4..55b4df638a 100644
--- a/Objects/bytearrayobject.c
+++ b/Objects/bytearrayobject.c
@@ -2234,8 +2234,10 @@ bytearray_extend(PyByteArrayObject *self, PyObject *arg)
}
bytearray_obj = PyByteArray_FromStringAndSize(NULL, buf_size);
- if (bytearray_obj == NULL)
+ if (bytearray_obj == NULL) {
+ Py_DECREF(it);
return NULL;
+ }
buf = PyByteArray_AS_STRING(bytearray_obj);
while ((item = PyIter_Next(it)) != NULL) {
@@ -2268,8 +2270,10 @@ bytearray_extend(PyByteArrayObject *self, PyObject *arg)
return NULL;
}
- if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1)
+ if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) {
+ Py_DECREF(bytearray_obj);
return NULL;
+ }
Py_DECREF(bytearray_obj);
Py_RETURN_NONE;
diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c
index 62ddf21532..796e400a82 100644
--- a/Objects/bytesobject.c
+++ b/Objects/bytesobject.c
@@ -875,7 +875,9 @@ bytes_hash(PyBytesObject *a)
register unsigned char *p;
register Py_hash_t x;
+#ifdef Py_DEBUG
assert(_Py_HashSecret_Initialized);
+#endif
if (a->ob_shash != -1)
return a->ob_shash;
len = Py_SIZE(a);
diff --git a/Objects/dictobject.c b/Objects/dictobject.c
index 768351e224..27de10dc8d 100644
--- a/Objects/dictobject.c
+++ b/Objects/dictobject.c
@@ -510,27 +510,16 @@ _PyDict_MaybeUntrack(PyObject *op)
_PyObject_GC_UNTRACK(op);
}
-
/*
-Internal routine to insert a new item into the table.
-Used both by the internal resize routine and by the public insert routine.
-Eats a reference to key and one to value.
-Returns -1 if an error occurred, or 0 on success.
+Internal routine to insert a new item into the table when you have entry object.
+Used by insertdict.
*/
static int
-insertdict(register PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject *value)
+insertdict_by_entry(register PyDictObject *mp, PyObject *key, Py_hash_t hash,
+ PyDictEntry *ep, PyObject *value)
{
PyObject *old_value;
- register PyDictEntry *ep;
- typedef PyDictEntry *(*lookupfunc)(PyDictObject *, PyObject *, Py_hash_t);
- assert(mp->ma_lookup != NULL);
- ep = mp->ma_lookup(mp, key, hash);
- if (ep == NULL) {
- Py_DECREF(key);
- Py_DECREF(value);
- return -1;
- }
MAINTAIN_TRACKING(mp, key, value);
if (ep->me_value != NULL) {
old_value = ep->me_value;
@@ -553,6 +542,28 @@ insertdict(register PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject *v
return 0;
}
+
+/*
+Internal routine to insert a new item into the table.
+Used both by the internal resize routine and by the public insert routine.
+Eats a reference to key and one to value.
+Returns -1 if an error occurred, or 0 on success.
+*/
+static int
+insertdict(register PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject *value)
+{
+ register PyDictEntry *ep;
+
+ assert(mp->ma_lookup != NULL);
+ ep = mp->ma_lookup(mp, key, hash);
+ if (ep == NULL) {
+ Py_DECREF(key);
+ Py_DECREF(value);
+ return -1;
+ }
+ return insertdict_by_entry(mp, key, hash, ep, value);
+}
+
/*
Internal routine used by dictresize() to insert an item which is
known to be absent from the dict. This routine also assumes that
@@ -776,39 +787,26 @@ PyDict_GetItemWithError(PyObject *op, PyObject *key)
return ep->me_value;
}
-/* CAUTION: PyDict_SetItem() must guarantee that it won't resize the
- * dictionary if it's merely replacing the value for an existing key.
- * This means that it's safe to loop over a dictionary with PyDict_Next()
- * and occasionally replace a value -- but you can't insert new keys or
- * remove them.
- */
-int
-PyDict_SetItem(register PyObject *op, PyObject *key, PyObject *value)
+static int
+dict_set_item_by_hash_or_entry(register PyObject *op, PyObject *key,
+ Py_hash_t hash, PyDictEntry *ep, PyObject *value)
{
register PyDictObject *mp;
- register Py_hash_t hash;
register Py_ssize_t n_used;
- if (!PyDict_Check(op)) {
- PyErr_BadInternalCall();
- return -1;
- }
- assert(key);
- assert(value);
mp = (PyDictObject *)op;
- if (!PyUnicode_CheckExact(key) ||
- (hash = ((PyUnicodeObject *) key)->hash) == -1)
- {
- hash = PyObject_Hash(key);
- if (hash == -1)
- return -1;
- }
assert(mp->ma_fill <= mp->ma_mask); /* at least one empty slot */
n_used = mp->ma_used;
Py_INCREF(value);
Py_INCREF(key);
- if (insertdict(mp, key, hash, value) != 0)
- return -1;
+ if (ep == NULL) {
+ if (insertdict(mp, key, hash, value) != 0)
+ return -1;
+ }
+ else {
+ if (insertdict_by_entry(mp, key, hash, ep, value) != 0)
+ return -1;
+ }
/* If we added a key, we can safely resize. Otherwise just return!
* If fill >= 2/3 size, adjust size. Normally, this doubles or
* quaduples the size, but it's also possible for the dict to shrink
@@ -828,6 +826,36 @@ PyDict_SetItem(register PyObject *op, PyObject *key, PyObject *value)
return dictresize(mp, (mp->ma_used > 50000 ? 2 : 4) * mp->ma_used);
}
+/* CAUTION: PyDict_SetItem() must guarantee that it won't resize the
+ * dictionary if it's merely replacing the value for an existing key.
+ * This means that it's safe to loop over a dictionary with PyDict_Next()
+ * and occasionally replace a value -- but you can't insert new keys or
+ * remove them.
+ */
+int
+PyDict_SetItem(register PyObject *op, PyObject *key, PyObject *value)
+{
+ register Py_hash_t hash;
+
+ if (!PyDict_Check(op)) {
+ PyErr_BadInternalCall();
+ return -1;
+ }
+ assert(key);
+ assert(value);
+ if (PyUnicode_CheckExact(key)) {
+ hash = ((PyUnicodeObject *) key)->hash;
+ if (hash == -1)
+ hash = PyObject_Hash(key);
+ }
+ else {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return -1;
+ }
+ return dict_set_item_by_hash_or_entry(op, key, hash, NULL, value);
+}
+
int
PyDict_DelItem(PyObject *op, PyObject *key)
{
@@ -1797,9 +1825,9 @@ dict_setdefault(register PyDictObject *mp, PyObject *args)
return NULL;
val = ep->me_value;
if (val == NULL) {
- val = failobj;
- if (PyDict_SetItem((PyObject*)mp, key, failobj))
- val = NULL;
+ if (dict_set_item_by_hash_or_entry((PyObject*)mp, key, hash, ep,
+ failobj) == 0)
+ val = failobj;
}
Py_XINCREF(val);
return val;
diff --git a/Objects/listobject.c b/Objects/listobject.c
index 00de597e56..b9ef0d0287 100644
--- a/Objects/listobject.c
+++ b/Objects/listobject.c
@@ -1,6 +1,7 @@
/* List object implementation */
#include "Python.h"
+#include "accu.h"
#ifdef STDC_HEADERS
#include <stddef.h>
diff --git a/Objects/tupleobject.c b/Objects/tupleobject.c
index f6dbc315d9..e99eda06f1 100644
--- a/Objects/tupleobject.c
+++ b/Objects/tupleobject.c
@@ -2,6 +2,7 @@
/* Tuple object implementation */
#include "Python.h"
+#include "accu.h"
/* Speed optimization to avoid frequent malloc/free of small tuples */
#ifndef PyTuple_MAXSAVESIZE
diff --git a/Objects/typeobject.c b/Objects/typeobject.c
index c3822abb0e..8cfa8894b0 100644
--- a/Objects/typeobject.c
+++ b/Objects/typeobject.c
@@ -458,26 +458,23 @@ type_set_bases(PyTypeObject *type, PyObject *value, void *context)
for (i = 0; i < PyTuple_GET_SIZE(value); i++) {
ob = PyTuple_GET_ITEM(value, i);
if (!PyType_Check(ob)) {
- PyErr_Format(
- PyExc_TypeError,
- "%s.__bases__ must be tuple of old- or new-style classes, not '%s'",
- type->tp_name, Py_TYPE(ob)->tp_name);
- return -1;
+ PyErr_Format(PyExc_TypeError,
+ "%s.__bases__ must be tuple of old- or "
+ "new-style classes, not '%s'",
+ type->tp_name, Py_TYPE(ob)->tp_name);
+ return -1;
}
- if (PyType_Check(ob)) {
- if (PyType_IsSubtype((PyTypeObject*)ob, type)) {
- PyErr_SetString(PyExc_TypeError,
- "a __bases__ item causes an inheritance cycle");
- return -1;
- }
+ if (PyType_IsSubtype((PyTypeObject*)ob, type)) {
+ PyErr_SetString(PyExc_TypeError,
+ "a __bases__ item causes an inheritance cycle");
+ return -1;
}
}
new_base = best_base(value);
- if (!new_base) {
+ if (!new_base)
return -1;
- }
if (!compatible_for_assignment(type->tp_base, new_base, "__bases__"))
return -1;
@@ -830,8 +827,13 @@ subtype_clear(PyObject *self)
assert(base);
}
- /* There's no need to clear the instance dict (if any);
- the collector will call its tp_clear handler. */
+ /* Clear the instance dict (if any), to break cycles involving only
+ __dict__ slots (as in the case 'self.__dict__ is self'). */
+ if (type->tp_dictoffset != base->tp_dictoffset) {
+ PyObject **dictptr = _PyObject_GetDictPtr(self);
+ if (dictptr && *dictptr)
+ Py_CLEAR(*dictptr);
+ }
if (baseclear)
return baseclear(self);
@@ -2457,6 +2459,13 @@ type_getattro(PyTypeObject *type, PyObject *name)
PyObject *meta_attribute, *attribute;
descrgetfunc meta_get;
+ if (!PyUnicode_Check(name)) {
+ PyErr_Format(PyExc_TypeError,
+ "attribute name must be string, not '%.200s'",
+ name->ob_type->tp_name);
+ return NULL;
+ }
+
/* Initialize this type (we'll assume the metatype is initialized) */
if (type->tp_dict == NULL) {
if (PyType_Ready(type) < 0)
diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c
index b70666106d..cd17789f53 100644
--- a/Objects/unicodeobject.c
+++ b/Objects/unicodeobject.c
@@ -7673,7 +7673,9 @@ unicode_hash(PyUnicodeObject *self)
Py_UNICODE *p;
Py_hash_t x;
+#ifdef Py_DEBUG
assert(_Py_HashSecret_Initialized);
+#endif
if (self->hash != -1)
return self->hash;
len = Py_SIZE(self);
@@ -9208,10 +9210,6 @@ unicode_getnewargs(PyUnicodeObject *v)
}
static PyMethodDef unicode_methods[] = {
-
- /* Order is according to common usage: often used methods should
- appear first, since lookup is done sequentially. */
-
{"encode", (PyCFunction) unicode_encode, METH_VARARGS | METH_KEYWORDS, encode__doc__},
{"replace", (PyCFunction) unicode_replace, METH_VARARGS, replace__doc__},
{"split", (PyCFunction) unicode_split, METH_VARARGS, split__doc__},
diff --git a/PC/_subprocess.c b/PC/_subprocess.c
index 2338f3085b..f9a79a7300 100644
--- a/PC/_subprocess.c
+++ b/PC/_subprocess.c
@@ -684,6 +684,7 @@ PyInit__subprocess()
defint(d, "WAIT_OBJECT_0", WAIT_OBJECT_0);
defint(d, "CREATE_NEW_CONSOLE", CREATE_NEW_CONSOLE);
defint(d, "CREATE_NEW_PROCESS_GROUP", CREATE_NEW_PROCESS_GROUP);
+ defint(d, "STILL_ACTIVE", STILL_ACTIVE);
return m;
}
diff --git a/PC/pyconfig.h b/PC/pyconfig.h
index 138d8fa7b2..ba80800857 100644
--- a/PC/pyconfig.h
+++ b/PC/pyconfig.h
@@ -340,7 +340,7 @@ Py_NO_ENABLE_SHARED to find out. Also support MS_NO_COREDLL for b/w compat */
# define SIZEOF_FPOS_T 8
# define SIZEOF_HKEY 8
# define SIZEOF_SIZE_T 8
-/* configure.in defines HAVE_LARGEFILE_SUPPORT iff HAVE_LONG_LONG,
+/* configure.ac defines HAVE_LARGEFILE_SUPPORT iff HAVE_LONG_LONG,
sizeof(off_t) > sizeof(long), and sizeof(PY_LONG_LONG) >= sizeof(off_t).
On Win64 the second condition is not true, but if fpos_t replaces off_t
then this is true. The uses of HAVE_LARGEFILE_SUPPORT imply that Win64
diff --git a/PC/winreg.c b/PC/winreg.c
index 1bc47b958b..240ca69c39 100644
--- a/PC/winreg.c
+++ b/PC/winreg.c
@@ -1110,7 +1110,7 @@ PyEnumKey(PyObject *self, PyObject *args)
* nul. RegEnumKeyEx requires a 257 character buffer to
* retrieve such a key name. */
wchar_t tmpbuf[257];
- DWORD len = sizeof(tmpbuf); /* includes NULL terminator */
+ DWORD len = sizeof(tmpbuf)/sizeof(wchar_t); /* includes NULL terminator */
if (!PyArg_ParseTuple(args, "Oi:EnumKey", &obKey, &index))
return NULL;
diff --git a/Python/ast.c b/Python/ast.c
index 6269c649d0..6faf5b21a6 100644
--- a/Python/ast.c
+++ b/Python/ast.c
@@ -645,7 +645,7 @@ seq_for_testlist(struct compiling *c, const node *n)
}
static arg_ty
-compiler_arg(struct compiling *c, const node *n)
+ast_for_arg(struct compiling *c, const node *n)
{
identifier name;
expr_ty annotation = NULL;
@@ -666,12 +666,6 @@ compiler_arg(struct compiling *c, const node *n)
}
return arg(name, annotation, c->c_arena);
-#if 0
- result = Tuple(args, Store, LINENO(n), n->n_col_offset, c->c_arena);
- if (!set_context(c, result, Store, n))
- return NULL;
- return result;
-#endif
}
/* returns -1 if failed to handle keyword only arguments
@@ -859,7 +853,7 @@ ast_for_arguments(struct compiling *c, const node *n)
"non-default argument follows default argument");
return NULL;
}
- arg = compiler_arg(c, ch);
+ arg = ast_for_arg(c, ch);
if (!arg)
return NULL;
asdl_seq_SET(posargs, k++, arg);
diff --git a/Python/future.c b/Python/future.c
index d6b653f315..d24ae416ff 100644
--- a/Python/future.c
+++ b/Python/future.c
@@ -60,13 +60,6 @@ future_parse(PyFutureFeatures *ff, mod_ty mod, const char *filename)
{
int i, found_docstring = 0, done = 0, prev_line = 0;
- static PyObject *future;
- if (!future) {
- future = PyUnicode_InternFromString("__future__");
- if (!future)
- return 0;
- }
-
if (!(mod->kind == Module_kind || mod->kind == Interactive_kind))
return 1;
@@ -93,7 +86,9 @@ future_parse(PyFutureFeatures *ff, mod_ty mod, const char *filename)
*/
if (s->kind == ImportFrom_kind) {
- if (s->v.ImportFrom.module == future) {
+ identifier modname = s->v.ImportFrom.module;
+ if (modname &&
+ !PyUnicode_CompareWithASCIIString(modname, "__future__")) {
if (done) {
PyErr_SetString(PyExc_SyntaxError,
ERR_LATE_FUTURE);
diff --git a/Python/import.c b/Python/import.c
index f443ab8511..1d3a4859da 100644
--- a/Python/import.c
+++ b/Python/import.c
@@ -1291,7 +1291,7 @@ load_source_module(char *name, char *pathname, FILE *fp)
{
struct stat st;
FILE *fpc;
- char buf[MAXPATHLEN+1];
+ char *buf;
char *cpathname;
PyCodeObject *co;
PyObject *m;
@@ -1310,6 +1310,10 @@ load_source_module(char *name, char *pathname, FILE *fp)
*/
st.st_mtime &= 0xFFFFFFFF;
}
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ return PyErr_NoMemory();
+ }
cpathname = make_compiled_pathname(
pathname, buf, (size_t)MAXPATHLEN + 1, !Py_OptimizeFlag);
if (cpathname != NULL &&
@@ -1317,9 +1321,9 @@ load_source_module(char *name, char *pathname, FILE *fp)
co = read_compiled_module(cpathname, fpc);
fclose(fpc);
if (co == NULL)
- return NULL;
+ goto error_exit;
if (update_compiled_module(co, pathname) < 0)
- return NULL;
+ goto error_exit;
if (Py_VerboseFlag)
PySys_WriteStderr("import %s # precompiled from %s\n",
name, cpathname);
@@ -1328,7 +1332,7 @@ load_source_module(char *name, char *pathname, FILE *fp)
else {
co = parse_source_module(pathname, fp);
if (co == NULL)
- return NULL;
+ goto error_exit;
if (Py_VerboseFlag)
PySys_WriteStderr("import %s # from %s\n",
name, pathname);
@@ -1342,7 +1346,12 @@ load_source_module(char *name, char *pathname, FILE *fp)
name, (PyObject *)co, pathname, cpathname);
Py_DECREF(co);
+ PyMem_FREE(buf);
return m;
+
+error_exit:
+ PyMem_FREE(buf);
+ return NULL;
}
/* Get source file -> unicode or None
@@ -1351,7 +1360,7 @@ load_source_module(char *name, char *pathname, FILE *fp)
static PyObject *
get_sourcefile(char *file)
{
- char py[MAXPATHLEN + 1];
+ char *py = NULL;
Py_ssize_t len;
PyObject *u;
struct stat statbuf;
@@ -1366,6 +1375,10 @@ get_sourcefile(char *file)
return PyUnicode_DecodeFSDefault(file);
}
+ py = PyMem_MALLOC(MAXPATHLEN+1);
+ if (py == NULL) {
+ return PyErr_NoMemory();
+ }
/* Start by trying to turn PEP 3147 path into source path. If that
* fails, just chop off the trailing character, i.e. legacy pyc path
* to py.
@@ -1382,6 +1395,7 @@ get_sourcefile(char *file)
else {
u = PyUnicode_DecodeFSDefault(file);
}
+ PyMem_FREE(py);
return u;
}
@@ -1401,7 +1415,7 @@ load_package(char *name, char *pathname)
PyObject *file = NULL;
PyObject *path = NULL;
int err;
- char buf[MAXPATHLEN+1];
+ char *buf = NULL;
FILE *fp = NULL;
struct filedescr *fdp;
@@ -1423,8 +1437,13 @@ load_package(char *name, char *pathname)
err = PyDict_SetItemString(d, "__path__", path);
if (err != 0)
goto error;
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ PyErr_NoMemory();
+ goto error;
+ }
buf[0] = '\0';
- fdp = find_module(name, "__init__", path, buf, sizeof(buf), &fp, NULL);
+ fdp = find_module(name, "__init__", path, buf, MAXPATHLEN+1, &fp, NULL);
if (fdp == NULL) {
if (PyErr_ExceptionMatches(PyExc_ImportError)) {
PyErr_Clear();
@@ -1442,6 +1461,8 @@ load_package(char *name, char *pathname)
error:
m = NULL;
cleanup:
+ if (buf)
+ PyMem_FREE(buf);
Py_XDECREF(path);
Py_XDECREF(file);
return m;
@@ -1571,7 +1592,7 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
static struct filedescr fd_frozen = {"", "", PY_FROZEN};
static struct filedescr fd_builtin = {"", "", C_BUILTIN};
static struct filedescr fd_package = {"", "", PKG_DIRECTORY};
- char name[MAXPATHLEN+1];
+ char *name;
#if defined(PYOS_OS2)
size_t saved_len;
size_t saved_namelen;
@@ -1585,6 +1606,11 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
"module name is too long");
return NULL;
}
+ name = PyMem_MALLOC(MAXPATHLEN+1);
+ if (name == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
strcpy(name, subname);
/* sys.meta_path import hook */
@@ -1596,7 +1622,7 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
PyErr_SetString(PyExc_RuntimeError,
"sys.meta_path must be a list of "
"import hooks");
- return NULL;
+ goto error_exit;
}
Py_INCREF(meta_path); /* zap guard */
npath = PyList_Size(meta_path);
@@ -1609,12 +1635,13 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
path : Py_None);
if (loader == NULL) {
Py_DECREF(meta_path);
- return NULL; /* true error */
+ goto error_exit; /* true error */
}
if (loader != Py_None) {
/* a loader was found */
*p_loader = loader;
Py_DECREF(meta_path);
+ PyMem_FREE(name);
return &importhookdescr;
}
Py_DECREF(loader);
@@ -1624,18 +1651,21 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
if (find_frozen(fullname) != NULL) {
strcpy(buf, fullname);
+ PyMem_FREE(name);
return &fd_frozen;
}
if (path == NULL) {
if (is_builtin(name)) {
strcpy(buf, name);
+ PyMem_FREE(name);
return &fd_builtin;
}
#ifdef MS_COREDLL
fp = PyWin_FindRegisteredModule(name, &fdp, buf, buflen);
if (fp != NULL) {
*p_fp = fp;
+ PyMem_FREE(name);
return fdp;
}
#endif
@@ -1645,7 +1675,7 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
if (path == NULL || !PyList_Check(path)) {
PyErr_SetString(PyExc_RuntimeError,
"sys.path must be a list of directory names");
- return NULL;
+ goto error_exit;
}
path_hooks = PySys_GetObject("path_hooks");
@@ -1653,14 +1683,14 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
PyErr_SetString(PyExc_RuntimeError,
"sys.path_hooks must be a list of "
"import hooks");
- return NULL;
+ goto error_exit;
}
path_importer_cache = PySys_GetObject("path_importer_cache");
if (path_importer_cache == NULL ||
!PyDict_Check(path_importer_cache)) {
PyErr_SetString(PyExc_RuntimeError,
"sys.path_importer_cache must be a dict");
- return NULL;
+ goto error_exit;
}
npath = PyList_Size(path);
@@ -1671,11 +1701,11 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
const char *base;
Py_ssize_t size;
if (!v)
- return NULL;
+ goto error_exit;
if (PyUnicode_Check(v)) {
v = PyUnicode_EncodeFSDefault(v);
if (v == NULL)
- return NULL;
+ goto error_exit;
}
else if (!PyBytes_Check(v))
continue;
@@ -1703,7 +1733,7 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
importer = get_path_importer(path_importer_cache,
path_hooks, origv);
if (importer == NULL) {
- return NULL;
+ goto error_exit;
}
/* Note: importer is a borrowed reference */
if (importer != Py_None) {
@@ -1712,10 +1742,11 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
"find_module",
"s", fullname);
if (loader == NULL)
- return NULL; /* error */
+ goto error_exit; /* error */
if (loader != Py_None) {
/* a loader was found */
*p_loader = loader;
+ PyMem_FREE(name);
return &importhookdescr;
}
Py_DECREF(loader);
@@ -1740,19 +1771,20 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
S_ISDIR(statbuf.st_mode) && /* it's a directory */
case_ok(buf, len, namelen, name)) { /* case matches */
if (find_init_module(buf)) { /* and has __init__.py */
+ PyMem_FREE(name);
return &fd_package;
}
else {
int err;
PyObject *unicode = PyUnicode_DecodeFSDefault(buf);
if (unicode == NULL)
- return NULL;
+ goto error_exit;
err = PyErr_WarnFormat(PyExc_ImportWarning, 1,
"Not importing directory '%U': missing __init__.py",
unicode);
Py_DECREF(unicode);
if (err)
- return NULL;
+ goto error_exit;
}
}
#endif
@@ -1833,10 +1865,15 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
if (fp == NULL) {
PyErr_Format(PyExc_ImportError,
"No module named %.200s", name);
- return NULL;
+ goto error_exit;
}
*p_fp = fp;
+ PyMem_FREE(name);
return fdp;
+
+error_exit:
+ PyMem_FREE(name);
+ return NULL;
}
/* case_ok(char* buf, Py_ssize_t len, Py_ssize_t namelen, char* name)
@@ -2416,7 +2453,7 @@ static PyObject *
import_module_level(char *name, PyObject *globals, PyObject *locals,
PyObject *fromlist, int level)
{
- char buf[MAXPATHLEN+1];
+ char *buf;
Py_ssize_t buflen = 0;
PyObject *parent, *head, *next, *tail;
@@ -2430,14 +2467,18 @@ import_module_level(char *name, PyObject *globals, PyObject *locals,
return NULL;
}
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ return PyErr_NoMemory();
+ }
parent = get_parent(globals, buf, &buflen, level);
if (parent == NULL)
- return NULL;
+ goto error_exit;
head = load_next(parent, level < 0 ? Py_None : parent, &name, buf,
&buflen);
if (head == NULL)
- return NULL;
+ goto error_exit;
tail = head;
Py_INCREF(tail);
@@ -2446,7 +2487,7 @@ import_module_level(char *name, PyObject *globals, PyObject *locals,
Py_DECREF(tail);
if (next == NULL) {
Py_DECREF(head);
- return NULL;
+ goto error_exit;
}
tail = next;
}
@@ -2458,7 +2499,7 @@ import_module_level(char *name, PyObject *globals, PyObject *locals,
Py_DECREF(head);
PyErr_SetString(PyExc_ValueError,
"Empty module name");
- return NULL;
+ goto error_exit;
}
if (fromlist != NULL) {
@@ -2468,16 +2509,22 @@ import_module_level(char *name, PyObject *globals, PyObject *locals,
if (fromlist == NULL) {
Py_DECREF(tail);
+ PyMem_FREE(buf);
return head;
}
Py_DECREF(head);
if (!ensure_fromlist(tail, fromlist, buf, buflen, 0)) {
Py_DECREF(tail);
- return NULL;
+ goto error_exit;
}
+ PyMem_FREE(buf);
return tail;
+
+error_exit:
+ PyMem_FREE(buf);
+ return NULL;
}
PyObject *
@@ -2880,7 +2927,7 @@ import_submodule(PyObject *mod, char *subname, char *fullname)
}
else {
PyObject *path, *loader = NULL;
- char buf[MAXPATHLEN+1];
+ char *buf;
struct filedescr *fdp;
FILE *fp = NULL;
@@ -2895,11 +2942,16 @@ import_submodule(PyObject *mod, char *subname, char *fullname)
}
}
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ return PyErr_NoMemory();
+ }
buf[0] = '\0';
fdp = find_module(fullname, subname, path, buf, MAXPATHLEN+1,
&fp, &loader);
Py_XDECREF(path);
if (fdp == NULL) {
+ PyMem_FREE(buf);
if (!PyErr_ExceptionMatches(PyExc_ImportError))
return NULL;
PyErr_Clear();
@@ -2914,6 +2966,7 @@ import_submodule(PyObject *mod, char *subname, char *fullname)
Py_XDECREF(m);
m = NULL;
}
+ PyMem_FREE(buf);
}
return m;
@@ -2931,7 +2984,7 @@ PyImport_ReloadModule(PyObject *m)
PyObject *modules = PyImport_GetModuleDict();
PyObject *path = NULL, *loader = NULL, *existing_m = NULL;
char *name, *subname;
- char buf[MAXPATHLEN+1];
+ char *buf;
struct filedescr *fdp;
FILE *fp = NULL;
PyObject *newm;
@@ -2991,6 +3044,11 @@ PyImport_ReloadModule(PyObject *m)
if (path == NULL)
PyErr_Clear();
}
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ Py_XDECREF(path);
+ return PyErr_NoMemory();
+ }
buf[0] = '\0';
fdp = find_module(name, subname, path, buf, MAXPATHLEN+1, &fp, &loader);
Py_XDECREF(path);
@@ -2998,6 +3056,7 @@ PyImport_ReloadModule(PyObject *m)
if (fdp == NULL) {
Py_XDECREF(loader);
imp_modules_reloading_clear();
+ PyMem_FREE(buf);
return NULL;
}
@@ -3015,6 +3074,7 @@ PyImport_ReloadModule(PyObject *m)
PyDict_SetItemString(modules, name, m);
}
imp_modules_reloading_clear();
+ PyMem_FREE(buf);
return newm;
}
@@ -3168,26 +3228,32 @@ call_find_module(char *name, PyObject *path)
PyObject *fob, *ret;
PyObject *pathobj;
struct filedescr *fdp;
- char pathname[MAXPATHLEN+1];
+ char *pathname;
FILE *fp = NULL;
int fd = -1;
char *found_encoding = NULL;
char *encoding = NULL;
+ pathname = PyMem_MALLOC(MAXPATHLEN+1);
+ if (pathname == NULL) {
+ return PyErr_NoMemory();
+ }
pathname[0] = '\0';
if (path == Py_None)
path = NULL;
fdp = find_module(NULL, name, path, pathname, MAXPATHLEN+1, &fp, NULL);
if (fdp == NULL)
- return NULL;
+ goto error_exit;
if (fp != NULL) {
fd = fileno(fp);
if (fd != -1)
fd = dup(fd);
fclose(fp);
fp = NULL;
- if (fd == -1)
- return PyErr_SetFromErrno(PyExc_OSError);
+ if (fd == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ goto error_exit;
+ }
}
if (fd != -1) {
if (strchr(fdp->mode, 'b') == NULL) {
@@ -3197,7 +3263,7 @@ call_find_module(char *name, PyObject *path)
lseek(fd, 0, 0); /* Reset position */
if (found_encoding == NULL && PyErr_Occurred()) {
close(fd);
- return NULL;
+ goto error_exit;
}
encoding = (found_encoding != NULL) ? found_encoding :
(char*)PyUnicode_GetDefaultEncoding();
@@ -3207,7 +3273,7 @@ call_find_module(char *name, PyObject *path)
if (fob == NULL) {
close(fd);
PyMem_FREE(found_encoding);
- return NULL;
+ goto error_exit;
}
}
else {
@@ -3218,8 +3284,12 @@ call_find_module(char *name, PyObject *path)
ret = Py_BuildValue("NN(ssi)",
fob, pathobj, fdp->suffix, fdp->mode, fdp->type);
PyMem_FREE(found_encoding);
-
+ PyMem_FREE(pathname);
return ret;
+
+error_exit:
+ PyMem_FREE(pathname);
+ return NULL;
}
static PyObject *
@@ -3509,7 +3579,7 @@ imp_cache_from_source(PyObject *self, PyObject *args, PyObject *kws)
{
static char *kwlist[] = {"path", "debug_override", NULL};
- char buf[MAXPATHLEN+1];
+ char *buf;
PyObject *pathbytes;
char *cpathname;
PyObject *debug_override = NULL;
@@ -3526,6 +3596,10 @@ imp_cache_from_source(PyObject *self, PyObject *args, PyObject *kws)
return NULL;
}
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ return PyErr_NoMemory();
+ }
cpathname = make_compiled_pathname(
PyBytes_AS_STRING(pathbytes),
buf, MAXPATHLEN+1, debug);
@@ -3533,9 +3607,14 @@ imp_cache_from_source(PyObject *self, PyObject *args, PyObject *kws)
if (cpathname == NULL) {
PyErr_Format(PyExc_SystemError, "path buffer too short");
+ PyMem_FREE(buf);
return NULL;
}
- return PyUnicode_DecodeFSDefault(buf);
+ {
+ PyObject *ret = PyUnicode_DecodeFSDefault(buf);
+ PyMem_FREE(buf);
+ return ret;
+ }
}
PyDoc_STRVAR(doc_cache_from_source,
@@ -3556,7 +3635,7 @@ imp_source_from_cache(PyObject *self, PyObject *args, PyObject *kws)
PyObject *pathname_obj;
char *pathname;
- char buf[MAXPATHLEN+1];
+ char *buf;
if (!PyArg_ParseTupleAndKeywords(
args, kws, "O&", kwlist,
@@ -3564,14 +3643,23 @@ imp_source_from_cache(PyObject *self, PyObject *args, PyObject *kws)
return NULL;
pathname = PyBytes_AS_STRING(pathname_obj);
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ return PyErr_NoMemory();
+ }
if (make_source_pathname(pathname, buf) == NULL) {
PyErr_Format(PyExc_ValueError, "Not a PEP 3147 pyc path: %s",
pathname);
Py_DECREF(pathname_obj);
+ PyMem_FREE(buf);
return NULL;
}
Py_DECREF(pathname_obj);
- return PyUnicode_FromString(buf);
+ {
+ PyObject *ret = PyUnicode_FromString(buf);
+ PyMem_FREE(buf);
+ return ret;
+ }
}
PyDoc_STRVAR(doc_source_from_cache,
diff --git a/Python/marshal.c b/Python/marshal.c
index 094f732382..3e2fbeb499 100644
--- a/Python/marshal.c
+++ b/Python/marshal.c
@@ -411,11 +411,12 @@ w_object(PyObject *v, WFILE *p)
else if (PyObject_CheckBuffer(v)) {
/* Write unknown buffer-style objects as a string */
char *s;
- PyBufferProcs *pb = v->ob_type->tp_as_buffer;
Py_buffer view;
- if ((*pb->bf_getbuffer)(v, &view, PyBUF_SIMPLE) != 0) {
+ if (PyObject_GetBuffer(v, &view, PyBUF_SIMPLE) != 0) {
w_byte(TYPE_UNKNOWN, p);
+ p->depth--;
p->error = WFERR_UNMARSHALLABLE;
+ return;
}
w_byte(TYPE_STRING, p);
n = view.len;
@@ -427,8 +428,7 @@ w_object(PyObject *v, WFILE *p)
}
w_long((long)n, p);
w_string(s, (int)n, p);
- if (pb->bf_releasebuffer != NULL)
- (*pb->bf_releasebuffer)(v, &view);
+ PyBuffer_Release(&view);
}
else {
w_byte(TYPE_UNKNOWN, p);
@@ -1383,7 +1383,7 @@ marshal_loads(PyObject *self, PyObject *args)
char *s;
Py_ssize_t n;
PyObject* result;
- if (!PyArg_ParseTuple(args, "s*:loads", &p))
+ if (!PyArg_ParseTuple(args, "y*:loads", &p))
return NULL;
s = p.buf;
n = p.len;
@@ -1400,10 +1400,10 @@ marshal_loads(PyObject *self, PyObject *args)
}
PyDoc_STRVAR(loads_doc,
-"loads(string)\n\
+"loads(bytes)\n\
\n\
-Convert the string to a value. If no valid value is found, raise\n\
-EOFError, ValueError or TypeError. Extra characters in the string are\n\
+Convert the bytes object to a value. If no valid value is found, raise\n\
+EOFError, ValueError or TypeError. Extra characters in the input are\n\
ignored.");
static PyMethodDef marshal_methods[] = {
diff --git a/Python/pythonrun.c b/Python/pythonrun.c
index 584a19b426..5a96bae059 100644
--- a/Python/pythonrun.c
+++ b/Python/pythonrun.c
@@ -1335,56 +1335,67 @@ parse_syntax_error(PyObject *err, PyObject **message, const char **filename,
return PyArg_ParseTuple(err, "O(ziiz)", message, filename,
lineno, offset, text);
- /* new style errors. `err' is an instance */
+ *message = NULL;
- if (! (v = PyObject_GetAttrString(err, "msg")))
+ /* new style errors. `err' is an instance */
+ *message = PyObject_GetAttrString(err, "msg");
+ if (!*message)
goto finally;
- *message = v;
- if (!(v = PyObject_GetAttrString(err, "filename")))
+ v = PyObject_GetAttrString(err, "filename");
+ if (!v)
goto finally;
- if (v == Py_None)
+ if (v == Py_None) {
+ Py_DECREF(v);
*filename = NULL;
- else if (! (*filename = _PyUnicode_AsString(v)))
- goto finally;
+ }
+ else {
+ *filename = _PyUnicode_AsString(v);
+ Py_DECREF(v);
+ if (!*filename)
+ goto finally;
+ }
- Py_DECREF(v);
- if (!(v = PyObject_GetAttrString(err, "lineno")))
+ v = PyObject_GetAttrString(err, "lineno");
+ if (!v)
goto finally;
hold = PyLong_AsLong(v);
Py_DECREF(v);
- v = NULL;
if (hold < 0 && PyErr_Occurred())
goto finally;
*lineno = (int)hold;
- if (!(v = PyObject_GetAttrString(err, "offset")))
+ v = PyObject_GetAttrString(err, "offset");
+ if (!v)
goto finally;
if (v == Py_None) {
*offset = -1;
Py_DECREF(v);
- v = NULL;
} else {
hold = PyLong_AsLong(v);
Py_DECREF(v);
- v = NULL;
if (hold < 0 && PyErr_Occurred())
goto finally;
*offset = (int)hold;
}
- if (!(v = PyObject_GetAttrString(err, "text")))
+ v = PyObject_GetAttrString(err, "text");
+ if (!v)
goto finally;
- if (v == Py_None)
+ if (v == Py_None) {
+ Py_DECREF(v);
*text = NULL;
- else if (!PyUnicode_Check(v) ||
- !(*text = _PyUnicode_AsString(v)))
- goto finally;
- Py_DECREF(v);
+ }
+ else {
+ *text = _PyUnicode_AsString(v);
+ Py_DECREF(v);
+ if (!*text)
+ goto finally;
+ }
return 1;
finally:
- Py_XDECREF(v);
+ Py_XDECREF(*message);
return 0;
}
diff --git a/Python/thread_pthread.h b/Python/thread_pthread.h
index 3efccf660a..3cde03567c 100644
--- a/Python/thread_pthread.h
+++ b/Python/thread_pthread.h
@@ -19,14 +19,18 @@
#define THREAD_STACK_SIZE 0 /* use default stack size */
#endif
-#if (defined(__APPLE__) || defined(__FreeBSD__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
- /* The default stack size for new threads on OSX is small enough that
- * we'll get hard crashes instead of 'maximum recursion depth exceeded'
- * exceptions.
- *
- * The default stack size below is the minimal stack size where a
- * simple recursive function doesn't cause a hard crash.
- */
+/* The default stack size for new threads on OSX and BSD is small enough that
+ * we'll get hard crashes instead of 'maximum recursion depth exceeded'
+ * exceptions.
+ *
+ * The default stack sizes below are the empirically determined minimal stack
+ * sizes where a simple recursive function doesn't cause a hard crash.
+ */
+#if defined(__APPLE__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
+#undef THREAD_STACK_SIZE
+#define THREAD_STACK_SIZE 0x500000
+#endif
+#if defined(__FreeBSD__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
#undef THREAD_STACK_SIZE
#define THREAD_STACK_SIZE 0x400000
#endif
diff --git a/Tools/msi/msi.py b/Tools/msi/msi.py
index 19cd7fb1dd..508816dd86 100644
--- a/Tools/msi/msi.py
+++ b/Tools/msi/msi.py
@@ -1021,6 +1021,7 @@ def add_files(db):
lib.add_file("check_soundcard.vbs")
lib.add_file("empty.vbs")
lib.add_file("Sine-1000Hz-300ms.aif")
+ lib.add_file("mime.types")
lib.glob("*.uue")
lib.glob("*.pem")
lib.glob("*.pck")
diff --git a/Tools/scripts/abitype.py b/Tools/scripts/abitype.py
index 4d96c8b70e..ab0ba42c36 100755
--- a/Tools/scripts/abitype.py
+++ b/Tools/scripts/abitype.py
@@ -3,34 +3,6 @@
# Usage: abitype.py < old_code > new_code
import re, sys
-############ Simplistic C scanner ##################################
-tokenizer = re.compile(
- r"(?P<preproc>#.*\n)"
- r"|(?P<comment>/\*.*?\*/)"
- r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)"
- r"|(?P<ws>[ \t\n]+)"
- r"|(?P<other>.)",
- re.MULTILINE)
-
-tokens = []
-source = sys.stdin.read()
-pos = 0
-while pos != len(source):
- m = tokenizer.match(source, pos)
- tokens.append([m.lastgroup, m.group()])
- pos += len(tokens[-1][1])
- if tokens[-1][0] == 'preproc':
- # continuation lines are considered
- # only in preprocess statements
- while tokens[-1][1].endswith('\\\n'):
- nl = source.find('\n', pos)
- if nl == -1:
- line = source[pos:]
- else:
- line = source[pos:nl+1]
- tokens[-1][1] += line
- pos += len(line)
-
###### Replacement of PyTypeObject static instances ##############
# classify each token, giving it a one-letter code:
@@ -79,7 +51,7 @@ def get_fields(start, real_end):
while tokens[pos][0] in ('ws', 'comment'):
pos += 1
if tokens[pos][1] != 'PyVarObject_HEAD_INIT':
- raise Exception, '%s has no PyVarObject_HEAD_INIT' % name
+ raise Exception('%s has no PyVarObject_HEAD_INIT' % name)
while tokens[pos][1] != ')':
pos += 1
pos += 1
@@ -183,18 +155,48 @@ def make_slots(name, fields):
return '\n'.join(res)
-# Main loop: replace all static PyTypeObjects until
-# there are none left.
-while 1:
- c = classify()
- m = re.search('(SW)?TWIW?=W?{.*?};', c)
- if not m:
- break
- start = m.start()
- end = m.end()
- name, fields = get_fields(start, m)
- tokens[start:end] = [('',make_slots(name, fields))]
+if __name__ == '__main__':
+
+ ############ Simplistic C scanner ##################################
+ tokenizer = re.compile(
+ r"(?P<preproc>#.*\n)"
+ r"|(?P<comment>/\*.*?\*/)"
+ r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)"
+ r"|(?P<ws>[ \t\n]+)"
+ r"|(?P<other>.)",
+ re.MULTILINE)
+
+ tokens = []
+ source = sys.stdin.read()
+ pos = 0
+ while pos != len(source):
+ m = tokenizer.match(source, pos)
+ tokens.append([m.lastgroup, m.group()])
+ pos += len(tokens[-1][1])
+ if tokens[-1][0] == 'preproc':
+ # continuation lines are considered
+ # only in preprocess statements
+ while tokens[-1][1].endswith('\\\n'):
+ nl = source.find('\n', pos)
+ if nl == -1:
+ line = source[pos:]
+ else:
+ line = source[pos:nl+1]
+ tokens[-1][1] += line
+ pos += len(line)
+
+ # Main loop: replace all static PyTypeObjects until
+ # there are none left.
+ while 1:
+ c = classify()
+ m = re.search('(SW)?TWIW?=W?{.*?};', c)
+ if not m:
+ break
+ start = m.start()
+ end = m.end()
+ name, fields = get_fields(start, m)
+ tokens[start:end] = [('',make_slots(name, fields))]
-# Output result to stdout
-for t, v in tokens:
- sys.stdout.write(v)
+ # Output result to stdout
+ for t, v in tokens:
+ sys.stdout.write(v)
diff --git a/Tools/scripts/find_recursionlimit.py b/Tools/scripts/find_recursionlimit.py
index 443f052c4e..7a8660356a 100755
--- a/Tools/scripts/find_recursionlimit.py
+++ b/Tools/scripts/find_recursionlimit.py
@@ -106,14 +106,16 @@ def check_limit(n, test_func_name):
else:
print("Yikes!")
-limit = 1000
-while 1:
- check_limit(limit, "test_recurse")
- check_limit(limit, "test_add")
- check_limit(limit, "test_repr")
- check_limit(limit, "test_init")
- check_limit(limit, "test_getattr")
- check_limit(limit, "test_getitem")
- check_limit(limit, "test_cpickle")
- print("Limit of %d is fine" % limit)
- limit = limit + 100
+if __name__ == '__main__':
+
+ limit = 1000
+ while 1:
+ check_limit(limit, "test_recurse")
+ check_limit(limit, "test_add")
+ check_limit(limit, "test_repr")
+ check_limit(limit, "test_init")
+ check_limit(limit, "test_getattr")
+ check_limit(limit, "test_getitem")
+ check_limit(limit, "test_cpickle")
+ print("Limit of %d is fine" % limit)
+ limit = limit + 100
diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py
index 77607ce137..a494a480f0 100755
--- a/Tools/scripts/findnocoding.py
+++ b/Tools/scripts/findnocoding.py
@@ -76,29 +76,31 @@ usage = """Usage: %s [-cd] paths...
-c: recognize Python source files trying to compile them
-d: debug output""" % sys.argv[0]
-try:
- opts, args = getopt.getopt(sys.argv[1:], 'cd')
-except getopt.error as msg:
- print(msg, file=sys.stderr)
- print(usage, file=sys.stderr)
- sys.exit(1)
-
-is_python = pysource.looks_like_python
-debug = False
-
-for o, a in opts:
- if o == '-c':
- is_python = pysource.can_be_compiled
- elif o == '-d':
- debug = True
-
-if not args:
- print(usage, file=sys.stderr)
- sys.exit(1)
-
-for fullpath in pysource.walk_python_files(args, is_python):
- if debug:
- print("Testing for coding: %s" % fullpath)
- result = needs_declaration(fullpath)
- if result:
- print(fullpath)
+if __name__ == '__main__':
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'cd')
+ except getopt.error as msg:
+ print(msg, file=sys.stderr)
+ print(usage, file=sys.stderr)
+ sys.exit(1)
+
+ is_python = pysource.looks_like_python
+ debug = False
+
+ for o, a in opts:
+ if o == '-c':
+ is_python = pysource.can_be_compiled
+ elif o == '-d':
+ debug = True
+
+ if not args:
+ print(usage, file=sys.stderr)
+ sys.exit(1)
+
+ for fullpath in pysource.walk_python_files(args, is_python):
+ if debug:
+ print("Testing for coding: %s" % fullpath)
+ result = needs_declaration(fullpath)
+ if result:
+ print(fullpath)
diff --git a/Tools/scripts/fixcid.py b/Tools/scripts/fixcid.py
index 2d4cd1ab00..87e2a0929f 100755
--- a/Tools/scripts/fixcid.py
+++ b/Tools/scripts/fixcid.py
@@ -292,7 +292,7 @@ def addsubst(substfile):
if not words: continue
if len(words) == 3 and words[0] == 'struct':
words[:2] = [words[0] + ' ' + words[1]]
- elif len(words) <> 2:
+ elif len(words) != 2:
err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line))
continue
if Reverse:
diff --git a/Tools/scripts/md5sum.py b/Tools/scripts/md5sum.py
index 743da72aa8..521960c17d 100755
--- a/Tools/scripts/md5sum.py
+++ b/Tools/scripts/md5sum.py
@@ -20,7 +20,7 @@ file ... : files to sum; '-' or no files means stdin
import sys
import os
import getopt
-import md5
+from hashlib import md5
def sum(*files):
sts = 0
diff --git a/Tools/scripts/parseentities.py b/Tools/scripts/parseentities.py
index 5b0f1c6741..a042d1c24c 100755
--- a/Tools/scripts/parseentities.py
+++ b/Tools/scripts/parseentities.py
@@ -13,7 +13,6 @@
"""
import re,sys
-import TextTools
entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
@@ -45,7 +44,7 @@ def writefile(f,defs):
charcode = repr(charcode)
else:
charcode = repr(charcode)
- comment = TextTools.collapse(comment)
+ comment = ' '.join(comment.split())
f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment))
f.write('\n}\n')
diff --git a/Tools/scripts/pdeps.py b/Tools/scripts/pdeps.py
index 938f31c164..f8218ac524 100755
--- a/Tools/scripts/pdeps.py
+++ b/Tools/scripts/pdeps.py
@@ -76,10 +76,9 @@ def process(filename, table):
nextline = fp.readline()
if not nextline: break
line = line[:-1] + nextline
- if m_import.match(line) >= 0:
- (a, b), (a1, b1) = m_import.regs[:2]
- elif m_from.match(line) >= 0:
- (a, b), (a1, b1) = m_from.regs[:2]
+ m_found = m_import.match(line) or m_from.match(line)
+ if m_found:
+ (a, b), (a1, b1) = m_found.regs[:2]
else: continue
words = line[a1:b1].split(',')
# print '#', line, words
@@ -87,6 +86,7 @@ def process(filename, table):
word = word.strip()
if word not in list:
list.append(word)
+ fp.close()
# Compute closure (this is in fact totally general)
@@ -123,7 +123,7 @@ def closure(table):
def inverse(table):
inv = {}
for key in table.keys():
- if not inv.has_key(key):
+ if key not in inv:
inv[key] = []
for item in table[key]:
store(inv, item, key)
diff --git a/configure b/configure
index 5f00c1ecfa..9e7cfc06b2 100755
--- a/configure
+++ b/configure
@@ -1,5 +1,5 @@
#! /bin/sh
-# From configure.in Revision.
+# From configure.ac Revision.
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.68 for python 3.2.
#
diff --git a/configure.in b/configure.ac
index 138d562011..138d562011 100644
--- a/configure.in
+++ b/configure.ac
diff --git a/pyconfig.h.in b/pyconfig.h.in
index 8e8f3191e7..ca8ccb695d 100644
--- a/pyconfig.h.in
+++ b/pyconfig.h.in
@@ -1,4 +1,4 @@
-/* pyconfig.h.in. Generated from configure.in by autoheader. */
+/* pyconfig.h.in. Generated from configure.ac by autoheader. */
#ifndef Py_PYCONFIG_H
diff --git a/setup.py b/setup.py
index 1318bc272c..d69875cffb 100644
--- a/setup.py
+++ b/setup.py
@@ -197,7 +197,7 @@ class PyBuildExt(build_ext):
# Python header files
headers = [sysconfig.get_config_h_filename()]
- headers += glob(os.path.join(sysconfig.get_path('platinclude'), "*.h"))
+ headers += glob(os.path.join(sysconfig.get_path('include'), "*.h"))
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)