summaryrefslogtreecommitdiff
path: root/Lib/multiprocessing
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/multiprocessing')
-rw-r--r--Lib/multiprocessing/__init__.py251
-rw-r--r--Lib/multiprocessing/connection.py154
-rw-r--r--Lib/multiprocessing/context.py348
-rw-r--r--Lib/multiprocessing/dummy/__init__.py33
-rw-r--r--Lib/multiprocessing/dummy/connection.py27
-rw-r--r--Lib/multiprocessing/forking.py474
-rw-r--r--Lib/multiprocessing/forkserver.py267
-rw-r--r--Lib/multiprocessing/heap.py54
-rw-r--r--Lib/multiprocessing/managers.py62
-rw-r--r--Lib/multiprocessing/pool.py143
-rw-r--r--Lib/multiprocessing/popen_fork.py83
-rw-r--r--Lib/multiprocessing/popen_forkserver.py69
-rw-r--r--Lib/multiprocessing/popen_spawn_posix.py69
-rw-r--r--Lib/multiprocessing/popen_spawn_win32.py99
-rw-r--r--Lib/multiprocessing/process.py83
-rw-r--r--Lib/multiprocessing/queues.py106
-rw-r--r--Lib/multiprocessing/reduction.py362
-rw-r--r--Lib/multiprocessing/resource_sharer.py158
-rw-r--r--Lib/multiprocessing/semaphore_tracker.py143
-rw-r--r--Lib/multiprocessing/sharedctypes.py40
-rw-r--r--Lib/multiprocessing/spawn.py287
-rw-r--r--Lib/multiprocessing/synchronize.py111
-rw-r--r--Lib/multiprocessing/util.py54
23 files changed, 2181 insertions, 1296 deletions
diff --git a/Lib/multiprocessing/__init__.py b/Lib/multiprocessing/__init__.py
index 1f3e67c9b8..86df638370 100644
--- a/Lib/multiprocessing/__init__.py
+++ b/Lib/multiprocessing/__init__.py
@@ -8,260 +8,31 @@
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
-# Try calling `multiprocessing.doc.main()` to read the html
-# documentation in a webbrowser.
-#
-#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
-__version__ = '0.70a1'
-
-__all__ = [
- 'Process', 'current_process', 'active_children', 'freeze_support',
- 'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
- 'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
- 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
- 'Event', 'Barrier', 'Queue', 'SimpleQueue', 'JoinableQueue', 'Pool',
- 'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
- ]
-
-__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
-
-#
-# Imports
-#
-
-import os
import sys
-
-from multiprocessing.process import Process, current_process, active_children
-from multiprocessing.util import SUBDEBUG, SUBWARNING
+from . import context
#
-# Exceptions
+# Copy stuff from default context
#
-class ProcessError(Exception):
- pass
-
-class BufferTooShort(ProcessError):
- pass
-
-class TimeoutError(ProcessError):
- pass
-
-class AuthenticationError(ProcessError):
- pass
-
-import _multiprocessing
-
-#
-# Definitions not depending on native semaphores
-#
-
-def Manager():
- '''
- Returns a manager associated with a running server process
-
- The managers methods such as `Lock()`, `Condition()` and `Queue()`
- can be used to create shared objects.
- '''
- from multiprocessing.managers import SyncManager
- m = SyncManager()
- m.start()
- return m
-
-def Pipe(duplex=True):
- '''
- Returns two connection object connected by a pipe
- '''
- from multiprocessing.connection import Pipe
- return Pipe(duplex)
-
-def cpu_count():
- '''
- Returns the number of CPUs in the system
- '''
- if sys.platform == 'win32':
- try:
- num = int(os.environ['NUMBER_OF_PROCESSORS'])
- except (ValueError, KeyError):
- num = 0
- elif 'bsd' in sys.platform or sys.platform == 'darwin':
- comm = '/sbin/sysctl -n hw.ncpu'
- if sys.platform == 'darwin':
- comm = '/usr' + comm
- try:
- with os.popen(comm) as p:
- num = int(p.read())
- except ValueError:
- num = 0
- else:
- try:
- num = os.sysconf('SC_NPROCESSORS_ONLN')
- except (ValueError, OSError, AttributeError):
- num = 0
-
- if num >= 1:
- return num
- else:
- raise NotImplementedError('cannot determine number of cpus')
-
-def freeze_support():
- '''
- Check whether this is a fake forked process in a frozen executable.
- If so then run code specified by commandline and exit.
- '''
- if sys.platform == 'win32' and getattr(sys, 'frozen', False):
- from multiprocessing.forking import freeze_support
- freeze_support()
-
-def get_logger():
- '''
- Return package logger -- if it does not already exist then it is created
- '''
- from multiprocessing.util import get_logger
- return get_logger()
-
-def log_to_stderr(level=None):
- '''
- Turn on logging and add a handler which prints to stderr
- '''
- from multiprocessing.util import log_to_stderr
- return log_to_stderr(level)
-
-def allow_connection_pickling():
- '''
- Install support for sending connections and sockets between processes
- '''
- # This is undocumented. In previous versions of multiprocessing
- # its only effect was to make socket objects inheritable on Windows.
- import multiprocessing.connection
+globals().update((name, getattr(context._default_context, name))
+ for name in context._default_context.__all__)
+__all__ = context._default_context.__all__
#
-# Definitions depending on native semaphores
+# XXX These should not really be documented or public.
#
-def Lock():
- '''
- Returns a non-recursive lock object
- '''
- from multiprocessing.synchronize import Lock
- return Lock()
-
-def RLock():
- '''
- Returns a recursive lock object
- '''
- from multiprocessing.synchronize import RLock
- return RLock()
-
-def Condition(lock=None):
- '''
- Returns a condition object
- '''
- from multiprocessing.synchronize import Condition
- return Condition(lock)
-
-def Semaphore(value=1):
- '''
- Returns a semaphore object
- '''
- from multiprocessing.synchronize import Semaphore
- return Semaphore(value)
-
-def BoundedSemaphore(value=1):
- '''
- Returns a bounded semaphore object
- '''
- from multiprocessing.synchronize import BoundedSemaphore
- return BoundedSemaphore(value)
-
-def Event():
- '''
- Returns an event object
- '''
- from multiprocessing.synchronize import Event
- return Event()
-
-def Barrier(parties, action=None, timeout=None):
- '''
- Returns a barrier object
- '''
- from multiprocessing.synchronize import Barrier
- return Barrier(parties, action, timeout)
-
-def Queue(maxsize=0):
- '''
- Returns a queue object
- '''
- from multiprocessing.queues import Queue
- return Queue(maxsize)
+SUBDEBUG = 5
+SUBWARNING = 25
-def JoinableQueue(maxsize=0):
- '''
- Returns a queue object
- '''
- from multiprocessing.queues import JoinableQueue
- return JoinableQueue(maxsize)
-
-def SimpleQueue():
- '''
- Returns a queue object
- '''
- from multiprocessing.queues import SimpleQueue
- return SimpleQueue()
-
-def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
- '''
- Returns a process pool object
- '''
- from multiprocessing.pool import Pool
- return Pool(processes, initializer, initargs, maxtasksperchild)
-
-def RawValue(typecode_or_type, *args):
- '''
- Returns a shared object
- '''
- from multiprocessing.sharedctypes import RawValue
- return RawValue(typecode_or_type, *args)
-
-def RawArray(typecode_or_type, size_or_initializer):
- '''
- Returns a shared array
- '''
- from multiprocessing.sharedctypes import RawArray
- return RawArray(typecode_or_type, size_or_initializer)
-
-def Value(typecode_or_type, *args, lock=True):
- '''
- Returns a synchronized shared object
- '''
- from multiprocessing.sharedctypes import Value
- return Value(typecode_or_type, *args, lock=lock)
-
-def Array(typecode_or_type, size_or_initializer, *, lock=True):
- '''
- Returns a synchronized shared array
- '''
- from multiprocessing.sharedctypes import Array
- return Array(typecode_or_type, size_or_initializer, lock=lock)
-
-#
#
+# Alias for main module -- will be reset by bootstrapping child processes
#
-if sys.platform == 'win32':
-
- def set_executable(executable):
- '''
- Sets the path to a python.exe or pythonw.exe binary used to run
- child processes on Windows instead of sys.executable.
- Useful for people embedding Python.
- '''
- from multiprocessing.forking import set_executable
- set_executable(executable)
-
- __all__ += ['set_executable']
+if '__main__' in sys.modules:
+ sys.modules['__mp_main__'] = sys.modules['__main__']
diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py
index 22589d0422..87117d91cb 100644
--- a/Lib/multiprocessing/connection.py
+++ b/Lib/multiprocessing/connection.py
@@ -12,22 +12,23 @@ __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]
import io
import os
import sys
-import pickle
-import select
import socket
import struct
-import errno
import time
import tempfile
import itertools
import _multiprocessing
-from multiprocessing import current_process, AuthenticationError, BufferTooShort
-from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug
-from multiprocessing.forking import ForkingPickler
+
+from . import reduction
+from . import util
+
+from . import AuthenticationError, BufferTooShort
+from .reduction import ForkingPickler
+
try:
import _winapi
- from _winapi import WAIT_OBJECT_0, WAIT_TIMEOUT, INFINITE
+ from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
except ImportError:
if sys.platform == 'win32':
raise
@@ -72,10 +73,10 @@ def arbitrary_address(family):
if family == 'AF_INET':
return ('localhost', 0)
elif family == 'AF_UNIX':
- return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
+ return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
- (os.getpid(), next(_mmap_counter)))
+ (os.getpid(), next(_mmap_counter)), dir="")
else:
raise ValueError('unrecognized family')
@@ -132,22 +133,22 @@ class _ConnectionBase:
def _check_closed(self):
if self._handle is None:
- raise IOError("handle is closed")
+ raise OSError("handle is closed")
def _check_readable(self):
if not self._readable:
- raise IOError("connection is write-only")
+ raise OSError("connection is write-only")
def _check_writable(self):
if not self._writable:
- raise IOError("connection is read-only")
+ raise OSError("connection is read-only")
def _bad_message_length(self):
if self._writable:
self._readable = False
else:
self.close()
- raise IOError("bad message length")
+ raise OSError("bad message length")
@property
def closed(self):
@@ -202,9 +203,7 @@ class _ConnectionBase:
"""Send a (picklable) object"""
self._check_closed()
self._check_writable()
- buf = io.BytesIO()
- ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
- self._send_bytes(buf.getbuffer())
+ self._send_bytes(ForkingPickler.dumps(obj))
def recv_bytes(self, maxlength=None):
"""
@@ -221,7 +220,7 @@ class _ConnectionBase:
def recv_bytes_into(self, buf, offset=0):
"""
- Receive bytes data into a writeable buffer-like object.
+ Receive bytes data into a writeable bytes-like object.
Return the number of bytes read.
"""
self._check_closed()
@@ -249,7 +248,7 @@ class _ConnectionBase:
self._check_closed()
self._check_readable()
buf = self._recv_bytes()
- return pickle.loads(buf.getbuffer())
+ return ForkingPickler.loads(buf.getbuffer())
def poll(self, timeout=0.0):
"""Whether there is any input available to be read"""
@@ -317,7 +316,7 @@ if _winapi:
return f
elif err == _winapi.ERROR_MORE_DATA:
return self._get_more_data(ov, maxsize)
- except IOError as e:
+ except OSError as e:
if e.winerror == _winapi.ERROR_BROKEN_PIPE:
raise EOFError
else:
@@ -389,7 +388,7 @@ class Connection(_ConnectionBase):
if remaining == size:
raise EOFError
else:
- raise IOError("got end of file during message")
+ raise OSError("got end of file during message")
buf.write(chunk)
remaining -= n
return buf
@@ -459,7 +458,7 @@ class Listener(object):
Returns a `Connection` object.
'''
if self._listener is None:
- raise IOError('listener is closed')
+ raise OSError('listener is closed')
c = self._listener.accept()
if self._authkey:
deliver_challenge(c, self._authkey)
@@ -545,7 +544,9 @@ else:
_winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
- 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
+ 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,
+ # default security descriptor: the handle cannot be inherited
+ _winapi.NULL
)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
@@ -590,7 +591,7 @@ class SocketListener(object):
self._last_accepted = None
if family == 'AF_UNIX':
- self._unlink = Finalize(
+ self._unlink = util.Finalize(
self, os.unlink, args=(address,), exitpriority=0
)
else:
@@ -638,8 +639,8 @@ if sys.platform == 'win32':
self._handle_queue = [self._new_handle(first=True)]
self._last_accepted = None
- sub_debug('listener created with address=%r', self._address)
- self.close = Finalize(
+ util.sub_debug('listener created with address=%r', self._address)
+ self.close = util.Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)
@@ -681,7 +682,7 @@ if sys.platform == 'win32':
@staticmethod
def _finalize_pipe_listener(queue, address):
- sub_debug('closing listener with address=%r', address)
+ util.sub_debug('closing listener with address=%r', address)
for handle in queue:
_winapi.CloseHandle(handle)
@@ -698,7 +699,7 @@ if sys.platform == 'win32':
0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
- except WindowsError as e:
+ except OSError as e:
if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
_winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
raise
@@ -727,7 +728,7 @@ def deliver_challenge(connection, authkey):
assert isinstance(authkey, bytes)
message = os.urandom(MESSAGE_LENGTH)
connection.send_bytes(CHALLENGE + message)
- digest = hmac.new(authkey, message).digest()
+ digest = hmac.new(authkey, message, 'md5').digest()
response = connection.recv_bytes(256) # reject large message
if response == digest:
connection.send_bytes(WELCOME)
@@ -741,7 +742,7 @@ def answer_challenge(connection, authkey):
message = connection.recv_bytes(256) # reject large message
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
message = message[len(CHALLENGE):]
- digest = hmac.new(authkey, message).digest()
+ digest = hmac.new(authkey, message, 'md5').digest()
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
if response != WELCOME:
@@ -884,28 +885,15 @@ if sys.platform == 'win32':
else:
- if hasattr(select, 'poll'):
- def _poll(fds, timeout):
- if timeout is not None:
- timeout = int(timeout * 1000) # timeout is in milliseconds
- fd_map = {}
- pollster = select.poll()
- for fd in fds:
- pollster.register(fd, select.POLLIN)
- if hasattr(fd, 'fileno'):
- fd_map[fd.fileno()] = fd
- else:
- fd_map[fd] = fd
- ls = []
- for fd, event in pollster.poll(timeout):
- if event & select.POLLNVAL:
- raise ValueError('invalid file descriptor %i' % fd)
- ls.append(fd_map[fd])
- return ls
- else:
- def _poll(fds, timeout):
- return select.select(fds, [], [], timeout)[0]
+ import selectors
+ # poll/select have the advantage of not requiring any extra file
+ # descriptor, contrarily to epoll/kqueue (also, they require a single
+ # syscall).
+ if hasattr(selectors, 'PollSelector'):
+ _WaitSelector = selectors.PollSelector
+ else:
+ _WaitSelector = selectors.SelectSelector
def wait(object_list, timeout=None):
'''
@@ -913,34 +901,54 @@ else:
Returns list of those objects in object_list which are ready/readable.
'''
- if timeout is not None:
- if timeout <= 0:
- return _poll(object_list, 0)
- else:
- deadline = time.time() + timeout
- while True:
- try:
- return _poll(object_list, timeout)
- except OSError as e:
- if e.errno != errno.EINTR:
- raise
+ with _WaitSelector() as selector:
+ for obj in object_list:
+ selector.register(obj, selectors.EVENT_READ)
+
if timeout is not None:
- timeout = deadline - time.time()
+ deadline = time.time() + timeout
+
+ while True:
+ ready = selector.select(timeout)
+ if ready:
+ return [key.fileobj for (key, events) in ready]
+ else:
+ if timeout is not None:
+ timeout = deadline - time.time()
+ if timeout < 0:
+ return ready
#
# Make connection and socket objects sharable if possible
#
if sys.platform == 'win32':
- from . import reduction
- ForkingPickler.register(socket.socket, reduction.reduce_socket)
- ForkingPickler.register(Connection, reduction.reduce_connection)
- ForkingPickler.register(PipeConnection, reduction.reduce_pipe_connection)
+ def reduce_connection(conn):
+ handle = conn.fileno()
+ with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
+ from . import resource_sharer
+ ds = resource_sharer.DupSocket(s)
+ return rebuild_connection, (ds, conn.readable, conn.writable)
+ def rebuild_connection(ds, readable, writable):
+ sock = ds.detach()
+ return Connection(sock.detach(), readable, writable)
+ reduction.register(Connection, reduce_connection)
+
+ def reduce_pipe_connection(conn):
+ access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
+ (_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
+ dh = reduction.DupHandle(conn.fileno(), access)
+ return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
+ def rebuild_pipe_connection(dh, readable, writable):
+ handle = dh.detach()
+ return PipeConnection(handle, readable, writable)
+ reduction.register(PipeConnection, reduce_pipe_connection)
+
else:
- try:
- from . import reduction
- except ImportError:
- pass
- else:
- ForkingPickler.register(socket.socket, reduction.reduce_socket)
- ForkingPickler.register(Connection, reduction.reduce_connection)
+ def reduce_connection(conn):
+ df = reduction.DupFd(conn.fileno())
+ return rebuild_connection, (df, conn.readable, conn.writable)
+ def rebuild_connection(df, readable, writable):
+ fd = df.detach()
+ return Connection(fd, readable, writable)
+ reduction.register(Connection, reduce_connection)
diff --git a/Lib/multiprocessing/context.py b/Lib/multiprocessing/context.py
new file mode 100644
index 0000000000..63849f9d16
--- /dev/null
+++ b/Lib/multiprocessing/context.py
@@ -0,0 +1,348 @@
+import os
+import sys
+import threading
+
+from . import process
+
+__all__ = [] # things are copied from here to __init__.py
+
+#
+# Exceptions
+#
+
+class ProcessError(Exception):
+ pass
+
+class BufferTooShort(ProcessError):
+ pass
+
+class TimeoutError(ProcessError):
+ pass
+
+class AuthenticationError(ProcessError):
+ pass
+
+#
+# Base type for contexts
+#
+
+class BaseContext(object):
+
+ ProcessError = ProcessError
+ BufferTooShort = BufferTooShort
+ TimeoutError = TimeoutError
+ AuthenticationError = AuthenticationError
+
+ current_process = staticmethod(process.current_process)
+ active_children = staticmethod(process.active_children)
+
+ def cpu_count(self):
+ '''Returns the number of CPUs in the system'''
+ num = os.cpu_count()
+ if num is None:
+ raise NotImplementedError('cannot determine number of cpus')
+ else:
+ return num
+
+ def Manager(self):
+ '''Returns a manager associated with a running server process
+
+ The managers methods such as `Lock()`, `Condition()` and `Queue()`
+ can be used to create shared objects.
+ '''
+ from .managers import SyncManager
+ m = SyncManager(ctx=self.get_context())
+ m.start()
+ return m
+
+ def Pipe(self, duplex=True):
+ '''Returns two connection object connected by a pipe'''
+ from .connection import Pipe
+ return Pipe(duplex)
+
+ def Lock(self):
+ '''Returns a non-recursive lock object'''
+ from .synchronize import Lock
+ return Lock(ctx=self.get_context())
+
+ def RLock(self):
+ '''Returns a recursive lock object'''
+ from .synchronize import RLock
+ return RLock(ctx=self.get_context())
+
+ def Condition(self, lock=None):
+ '''Returns a condition object'''
+ from .synchronize import Condition
+ return Condition(lock, ctx=self.get_context())
+
+ def Semaphore(self, value=1):
+ '''Returns a semaphore object'''
+ from .synchronize import Semaphore
+ return Semaphore(value, ctx=self.get_context())
+
+ def BoundedSemaphore(self, value=1):
+ '''Returns a bounded semaphore object'''
+ from .synchronize import BoundedSemaphore
+ return BoundedSemaphore(value, ctx=self.get_context())
+
+ def Event(self):
+ '''Returns an event object'''
+ from .synchronize import Event
+ return Event(ctx=self.get_context())
+
+ def Barrier(self, parties, action=None, timeout=None):
+ '''Returns a barrier object'''
+ from .synchronize import Barrier
+ return Barrier(parties, action, timeout, ctx=self.get_context())
+
+ def Queue(self, maxsize=0):
+ '''Returns a queue object'''
+ from .queues import Queue
+ return Queue(maxsize, ctx=self.get_context())
+
+ def JoinableQueue(self, maxsize=0):
+ '''Returns a queue object'''
+ from .queues import JoinableQueue
+ return JoinableQueue(maxsize, ctx=self.get_context())
+
+ def SimpleQueue(self):
+ '''Returns a queue object'''
+ from .queues import SimpleQueue
+ return SimpleQueue(ctx=self.get_context())
+
+ def Pool(self, processes=None, initializer=None, initargs=(),
+ maxtasksperchild=None):
+ '''Returns a process pool object'''
+ from .pool import Pool
+ return Pool(processes, initializer, initargs, maxtasksperchild,
+ context=self.get_context())
+
+ def RawValue(self, typecode_or_type, *args):
+ '''Returns a shared object'''
+ from .sharedctypes import RawValue
+ return RawValue(typecode_or_type, *args)
+
+ def RawArray(self, typecode_or_type, size_or_initializer):
+ '''Returns a shared array'''
+ from .sharedctypes import RawArray
+ return RawArray(typecode_or_type, size_or_initializer)
+
+ def Value(self, typecode_or_type, *args, lock=True):
+ '''Returns a synchronized shared object'''
+ from .sharedctypes import Value
+ return Value(typecode_or_type, *args, lock=lock,
+ ctx=self.get_context())
+
+ def Array(self, typecode_or_type, size_or_initializer, *, lock=True):
+ '''Returns a synchronized shared array'''
+ from .sharedctypes import Array
+ return Array(typecode_or_type, size_or_initializer, lock=lock,
+ ctx=self.get_context())
+
+ def freeze_support(self):
+ '''Check whether this is a fake forked process in a frozen executable.
+ If so then run code specified by commandline and exit.
+ '''
+ if sys.platform == 'win32' and getattr(sys, 'frozen', False):
+ from .spawn import freeze_support
+ freeze_support()
+
+ def get_logger(self):
+ '''Return package logger -- if it does not already exist then
+ it is created.
+ '''
+ from .util import get_logger
+ return get_logger()
+
+ def log_to_stderr(self, level=None):
+ '''Turn on logging and add a handler which prints to stderr'''
+ from .util import log_to_stderr
+ return log_to_stderr(level)
+
+ def allow_connection_pickling(self):
+ '''Install support for sending connections and sockets
+ between processes
+ '''
+ # This is undocumented. In previous versions of multiprocessing
+ # its only effect was to make socket objects inheritable on Windows.
+ from . import connection
+
+ def set_executable(self, executable):
+ '''Sets the path to a python.exe or pythonw.exe binary used to run
+ child processes instead of sys.executable when using the 'spawn'
+ start method. Useful for people embedding Python.
+ '''
+ from .spawn import set_executable
+ set_executable(executable)
+
+ def set_forkserver_preload(self, module_names):
+ '''Set list of module names to try to load in forkserver process.
+ This is really just a hint.
+ '''
+ from .forkserver import set_forkserver_preload
+ set_forkserver_preload(module_names)
+
+ def get_context(self, method=None):
+ if method is None:
+ return self
+ try:
+ ctx = _concrete_contexts[method]
+ except KeyError:
+ raise ValueError('cannot find context for %r' % method)
+ ctx._check_available()
+ return ctx
+
+ def get_start_method(self, allow_none=False):
+ return self._name
+
+ def set_start_method(self, method=None):
+ raise ValueError('cannot set start method of concrete context')
+
+ def _check_available(self):
+ pass
+
+#
+# Type of default context -- underlying context can be set at most once
+#
+
+class Process(process.BaseProcess):
+ _start_method = None
+ @staticmethod
+ def _Popen(process_obj):
+ return _default_context.get_context().Process._Popen(process_obj)
+
+class DefaultContext(BaseContext):
+ Process = Process
+
+ def __init__(self, context):
+ self._default_context = context
+ self._actual_context = None
+
+ def get_context(self, method=None):
+ if method is None:
+ if self._actual_context is None:
+ self._actual_context = self._default_context
+ return self._actual_context
+ else:
+ return super().get_context(method)
+
+ def set_start_method(self, method, force=False):
+ if self._actual_context is not None and not force:
+ raise RuntimeError('context has already been set')
+ if method is None and force:
+ self._actual_context = None
+ return
+ self._actual_context = self.get_context(method)
+
+ def get_start_method(self, allow_none=False):
+ if self._actual_context is None:
+ if allow_none:
+ return None
+ self._actual_context = self._default_context
+ return self._actual_context._name
+
+ def get_all_start_methods(self):
+ if sys.platform == 'win32':
+ return ['spawn']
+ else:
+ from . import reduction
+ if reduction.HAVE_SEND_HANDLE:
+ return ['fork', 'spawn', 'forkserver']
+ else:
+ return ['fork', 'spawn']
+
+DefaultContext.__all__ = list(x for x in dir(DefaultContext) if x[0] != '_')
+
+#
+# Context types for fixed start method
+#
+
+if sys.platform != 'win32':
+
+ class ForkProcess(process.BaseProcess):
+ _start_method = 'fork'
+ @staticmethod
+ def _Popen(process_obj):
+ from .popen_fork import Popen
+ return Popen(process_obj)
+
+ class SpawnProcess(process.BaseProcess):
+ _start_method = 'spawn'
+ @staticmethod
+ def _Popen(process_obj):
+ from .popen_spawn_posix import Popen
+ return Popen(process_obj)
+
+ class ForkServerProcess(process.BaseProcess):
+ _start_method = 'forkserver'
+ @staticmethod
+ def _Popen(process_obj):
+ from .popen_forkserver import Popen
+ return Popen(process_obj)
+
+ class ForkContext(BaseContext):
+ _name = 'fork'
+ Process = ForkProcess
+
+ class SpawnContext(BaseContext):
+ _name = 'spawn'
+ Process = SpawnProcess
+
+ class ForkServerContext(BaseContext):
+ _name = 'forkserver'
+ Process = ForkServerProcess
+ def _check_available(self):
+ from . import reduction
+ if not reduction.HAVE_SEND_HANDLE:
+ raise ValueError('forkserver start method not available')
+
+ _concrete_contexts = {
+ 'fork': ForkContext(),
+ 'spawn': SpawnContext(),
+ 'forkserver': ForkServerContext(),
+ }
+ _default_context = DefaultContext(_concrete_contexts['fork'])
+
+else:
+
+ class SpawnProcess(process.BaseProcess):
+ _start_method = 'spawn'
+ @staticmethod
+ def _Popen(process_obj):
+ from .popen_spawn_win32 import Popen
+ return Popen(process_obj)
+
+ class SpawnContext(BaseContext):
+ _name = 'spawn'
+ Process = SpawnProcess
+
+ _concrete_contexts = {
+ 'spawn': SpawnContext(),
+ }
+ _default_context = DefaultContext(_concrete_contexts['spawn'])
+
+#
+# Force the start method
+#
+
+def _force_start_method(method):
+ _default_context._actual_context = _concrete_contexts[method]
+
+#
+# Check that the current thread is spawning a child process
+#
+
+_tls = threading.local()
+
+def get_spawning_popen():
+ return getattr(_tls, 'spawning_popen', None)
+
+def set_spawning_popen(popen):
+ _tls.spawning_popen = popen
+
+def assert_spawning(obj):
+ if get_spawning_popen() is None:
+ raise RuntimeError(
+ '%s objects should only be shared between processes'
+ ' through inheritance' % type(obj).__name__
+ )
diff --git a/Lib/multiprocessing/dummy/__init__.py b/Lib/multiprocessing/dummy/__init__.py
index e31fc61572..135db7f77f 100644
--- a/Lib/multiprocessing/dummy/__init__.py
+++ b/Lib/multiprocessing/dummy/__init__.py
@@ -4,32 +4,7 @@
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-# 3. Neither the name of author nor the names of any contributors may be
-# used to endorse or promote products derived from this software
-# without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
+# Licensed to PSF under a Contributor Agreement.
#
__all__ = [
@@ -47,7 +22,7 @@ import sys
import weakref
import array
-from multiprocessing.dummy.connection import Pipe
+from .connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
@@ -129,7 +104,7 @@ class Value(object):
self._value = value
value = property(_get, _set)
def __repr__(self):
- return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
+ return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
@@ -138,7 +113,7 @@ def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
- from multiprocessing.pool import ThreadPool
+ from ..pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
diff --git a/Lib/multiprocessing/dummy/connection.py b/Lib/multiprocessing/dummy/connection.py
index 874ec8e432..694ef96215 100644
--- a/Lib/multiprocessing/dummy/connection.py
+++ b/Lib/multiprocessing/dummy/connection.py
@@ -4,32 +4,7 @@
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-# 3. Neither the name of author nor the names of any contributors may be
-# used to endorse or promote products derived from this software
-# without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
+# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
diff --git a/Lib/multiprocessing/forking.py b/Lib/multiprocessing/forking.py
deleted file mode 100644
index 449978aaac..0000000000
--- a/Lib/multiprocessing/forking.py
+++ /dev/null
@@ -1,474 +0,0 @@
-#
-# Module for starting a process object using os.fork() or CreateProcess()
-#
-# multiprocessing/forking.py
-#
-# Copyright (c) 2006-2008, R Oudkerk
-# Licensed to PSF under a Contributor Agreement.
-#
-
-import os
-import sys
-import signal
-import errno
-
-from multiprocessing import util, process
-
-__all__ = ['Popen', 'assert_spawning', 'duplicate', 'close', 'ForkingPickler']
-
-#
-# Check that the current thread is spawning a child process
-#
-
-def assert_spawning(self):
- if not Popen.thread_is_spawning():
- raise RuntimeError(
- '%s objects should only be shared between processes'
- ' through inheritance' % type(self).__name__
- )
-
-#
-# Try making some callable types picklable
-#
-
-from pickle import Pickler
-from copyreg import dispatch_table
-
-class ForkingPickler(Pickler):
- _extra_reducers = {}
- def __init__(self, *args):
- Pickler.__init__(self, *args)
- self.dispatch_table = dispatch_table.copy()
- self.dispatch_table.update(self._extra_reducers)
- @classmethod
- def register(cls, type, reduce):
- cls._extra_reducers[type] = reduce
-
-def _reduce_method(m):
- if m.__self__ is None:
- return getattr, (m.__class__, m.__func__.__name__)
- else:
- return getattr, (m.__self__, m.__func__.__name__)
-class _C:
- def f(self):
- pass
-ForkingPickler.register(type(_C().f), _reduce_method)
-
-
-def _reduce_method_descriptor(m):
- return getattr, (m.__objclass__, m.__name__)
-ForkingPickler.register(type(list.append), _reduce_method_descriptor)
-ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
-
-try:
- from functools import partial
-except ImportError:
- pass
-else:
- def _reduce_partial(p):
- return _rebuild_partial, (p.func, p.args, p.keywords or {})
- def _rebuild_partial(func, args, keywords):
- return partial(func, *args, **keywords)
- ForkingPickler.register(partial, _reduce_partial)
-
-#
-# Unix
-#
-
-if sys.platform != 'win32':
- duplicate = os.dup
- close = os.close
-
- #
- # We define a Popen class similar to the one from subprocess, but
- # whose constructor takes a process object as its argument.
- #
-
- class Popen(object):
-
- def __init__(self, process_obj):
- sys.stdout.flush()
- sys.stderr.flush()
- self.returncode = None
-
- r, w = os.pipe()
- self.sentinel = r
-
- self.pid = os.fork()
- if self.pid == 0:
- os.close(r)
- if 'random' in sys.modules:
- import random
- random.seed()
- code = process_obj._bootstrap()
- os._exit(code)
-
- # `w` will be closed when the child exits, at which point `r`
- # will become ready for reading (using e.g. select()).
- os.close(w)
- util.Finalize(self, os.close, (r,))
-
- def poll(self, flag=os.WNOHANG):
- if self.returncode is None:
- while True:
- try:
- pid, sts = os.waitpid(self.pid, flag)
- except os.error as e:
- if e.errno == errno.EINTR:
- continue
- # Child process not yet created. See #1731717
- # e.errno == errno.ECHILD == 10
- return None
- else:
- break
- if pid == self.pid:
- if os.WIFSIGNALED(sts):
- self.returncode = -os.WTERMSIG(sts)
- else:
- assert os.WIFEXITED(sts)
- self.returncode = os.WEXITSTATUS(sts)
- return self.returncode
-
- def wait(self, timeout=None):
- if self.returncode is None:
- if timeout is not None:
- from multiprocessing.connection import wait
- if not wait([self.sentinel], timeout):
- return None
- # This shouldn't block if wait() returned successfully.
- return self.poll(os.WNOHANG if timeout == 0.0 else 0)
- return self.returncode
-
- def terminate(self):
- if self.returncode is None:
- try:
- os.kill(self.pid, signal.SIGTERM)
- except OSError:
- if self.wait(timeout=0.1) is None:
- raise
-
- @staticmethod
- def thread_is_spawning():
- return False
-
-#
-# Windows
-#
-
-else:
- import _thread
- import msvcrt
- import _winapi
-
- from pickle import load, HIGHEST_PROTOCOL
-
- def dump(obj, file, protocol=None):
- ForkingPickler(file, protocol).dump(obj)
-
- #
- #
- #
-
- TERMINATE = 0x10000
- WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
- WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
-
- close = _winapi.CloseHandle
-
- #
- # _python_exe is the assumed path to the python executable.
- # People embedding Python want to modify it.
- #
-
- if WINSERVICE:
- _python_exe = os.path.join(sys.exec_prefix, 'python.exe')
- else:
- _python_exe = sys.executable
-
- def set_executable(exe):
- global _python_exe
- _python_exe = exe
-
- #
- #
- #
-
- def duplicate(handle, target_process=None, inheritable=False):
- if target_process is None:
- target_process = _winapi.GetCurrentProcess()
- return _winapi.DuplicateHandle(
- _winapi.GetCurrentProcess(), handle, target_process,
- 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS
- )
-
- #
- # We define a Popen class similar to the one from subprocess, but
- # whose constructor takes a process object as its argument.
- #
-
- class Popen(object):
- '''
- Start a subprocess to run the code of a process object
- '''
- _tls = _thread._local()
-
- def __init__(self, process_obj):
- cmd = ' '.join('"%s"' % x for x in get_command_line())
- prep_data = get_preparation_data(process_obj._name)
-
- # create pipe for communication with child
- rfd, wfd = os.pipe()
-
- # get handle for read end of the pipe and make it inheritable
- rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
- os.close(rfd)
-
- with open(wfd, 'wb', closefd=True) as to_child:
- # start process
- try:
- hp, ht, pid, tid = _winapi.CreateProcess(
- _python_exe, cmd + (' %s' % rhandle),
- None, None, 1, 0, None, None, None
- )
- _winapi.CloseHandle(ht)
- finally:
- close(rhandle)
-
- # set attributes of self
- self.pid = pid
- self.returncode = None
- self._handle = hp
- self.sentinel = int(hp)
- util.Finalize(self, _winapi.CloseHandle, (self.sentinel,))
-
- # send information to child
- Popen._tls.process_handle = int(hp)
- try:
- dump(prep_data, to_child, HIGHEST_PROTOCOL)
- dump(process_obj, to_child, HIGHEST_PROTOCOL)
- finally:
- del Popen._tls.process_handle
-
- @staticmethod
- def thread_is_spawning():
- return getattr(Popen._tls, 'process_handle', None) is not None
-
- @staticmethod
- def duplicate_for_child(handle):
- return duplicate(handle, Popen._tls.process_handle)
-
- def wait(self, timeout=None):
- if self.returncode is None:
- if timeout is None:
- msecs = _winapi.INFINITE
- else:
- msecs = max(0, int(timeout * 1000 + 0.5))
-
- res = _winapi.WaitForSingleObject(int(self._handle), msecs)
- if res == _winapi.WAIT_OBJECT_0:
- code = _winapi.GetExitCodeProcess(self._handle)
- if code == TERMINATE:
- code = -signal.SIGTERM
- self.returncode = code
-
- return self.returncode
-
- def poll(self):
- return self.wait(timeout=0)
-
- def terminate(self):
- if self.returncode is None:
- try:
- _winapi.TerminateProcess(int(self._handle), TERMINATE)
- except OSError:
- if self.wait(timeout=1.0) is None:
- raise
-
- #
- #
- #
-
- def is_forking(argv):
- '''
- Return whether commandline indicates we are forking
- '''
- if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
- assert len(argv) == 3
- return True
- else:
- return False
-
-
- def freeze_support():
- '''
- Run code for process object if this in not the main process
- '''
- if is_forking(sys.argv):
- main()
- sys.exit()
-
-
- def get_command_line():
- '''
- Returns prefix of command line used for spawning a child process
- '''
- if getattr(process.current_process(), '_inheriting', False):
- raise RuntimeError('''
- Attempt to start a new process before the current process
- has finished its bootstrapping phase.
-
- This probably means that you are on Windows and you have
- forgotten to use the proper idiom in the main module:
-
- if __name__ == '__main__':
- freeze_support()
- ...
-
- The "freeze_support()" line can be omitted if the program
- is not going to be frozen to produce a Windows executable.''')
-
- if getattr(sys, 'frozen', False):
- return [sys.executable, '--multiprocessing-fork']
- else:
- prog = 'from multiprocessing.forking import main; main()'
- opts = util._args_from_interpreter_flags()
- return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork']
-
-
- def main():
- '''
- Run code specified by data received over pipe
- '''
- assert is_forking(sys.argv)
-
- handle = int(sys.argv[-1])
- fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
- from_parent = os.fdopen(fd, 'rb')
-
- process.current_process()._inheriting = True
- preparation_data = load(from_parent)
- prepare(preparation_data)
- self = load(from_parent)
- process.current_process()._inheriting = False
-
- from_parent.close()
-
- exitcode = self._bootstrap()
- sys.exit(exitcode)
-
-
- def get_preparation_data(name):
- '''
- Return info about parent needed by child to unpickle process object
- '''
- from .util import _logger, _log_to_stderr
-
- d = dict(
- name=name,
- sys_path=sys.path,
- sys_argv=sys.argv,
- log_to_stderr=_log_to_stderr,
- orig_dir=process.ORIGINAL_DIR,
- authkey=process.current_process().authkey,
- )
-
- if _logger is not None:
- d['log_level'] = _logger.getEffectiveLevel()
-
- if not WINEXE and not WINSERVICE:
- main_path = getattr(sys.modules['__main__'], '__file__', None)
- if not main_path and sys.argv[0] not in ('', '-c'):
- main_path = sys.argv[0]
- if main_path is not None:
- if not os.path.isabs(main_path) and \
- process.ORIGINAL_DIR is not None:
- main_path = os.path.join(process.ORIGINAL_DIR, main_path)
- d['main_path'] = os.path.normpath(main_path)
-
- return d
-
-#
-# Prepare current process
-#
-
-old_main_modules = []
-
-def prepare(data):
- '''
- Try to get current process ready to unpickle process object
- '''
- old_main_modules.append(sys.modules['__main__'])
-
- if 'name' in data:
- process.current_process().name = data['name']
-
- if 'authkey' in data:
- process.current_process()._authkey = data['authkey']
-
- if 'log_to_stderr' in data and data['log_to_stderr']:
- util.log_to_stderr()
-
- if 'log_level' in data:
- util.get_logger().setLevel(data['log_level'])
-
- if 'sys_path' in data:
- sys.path = data['sys_path']
-
- if 'sys_argv' in data:
- sys.argv = data['sys_argv']
-
- if 'dir' in data:
- os.chdir(data['dir'])
-
- if 'orig_dir' in data:
- process.ORIGINAL_DIR = data['orig_dir']
-
- if 'main_path' in data:
- # XXX (ncoghlan): The following code makes several bogus
- # assumptions regarding the relationship between __file__
- # and a module's real name. See PEP 302 and issue #10845
- main_path = data['main_path']
- main_name = os.path.splitext(os.path.basename(main_path))[0]
- if main_name == '__init__':
- main_name = os.path.basename(os.path.dirname(main_path))
-
- if main_name == '__main__':
- main_module = sys.modules['__main__']
- main_module.__file__ = main_path
- elif main_name != 'ipython':
- # Main modules not actually called __main__.py may
- # contain additional code that should still be executed
- import imp
-
- if main_path is None:
- dirs = None
- elif os.path.basename(main_path).startswith('__init__.py'):
- dirs = [os.path.dirname(os.path.dirname(main_path))]
- else:
- dirs = [os.path.dirname(main_path)]
-
- assert main_name not in sys.modules, main_name
- file, path_name, etc = imp.find_module(main_name, dirs)
- try:
- # We would like to do "imp.load_module('__main__', ...)"
- # here. However, that would cause 'if __name__ ==
- # "__main__"' clauses to be executed.
- main_module = imp.load_module(
- '__parents_main__', file, path_name, etc
- )
- finally:
- if file:
- file.close()
-
- sys.modules['__main__'] = main_module
- main_module.__name__ = '__main__'
-
- # Try to make the potentially picklable objects in
- # sys.modules['__main__'] realize they are in the main
- # module -- somewhat ugly.
- for obj in list(main_module.__dict__.values()):
- try:
- if obj.__module__ == '__parents_main__':
- obj.__module__ = '__main__'
- except Exception:
- pass
diff --git a/Lib/multiprocessing/forkserver.py b/Lib/multiprocessing/forkserver.py
new file mode 100644
index 0000000000..387517ebdc
--- /dev/null
+++ b/Lib/multiprocessing/forkserver.py
@@ -0,0 +1,267 @@
+import errno
+import os
+import selectors
+import signal
+import socket
+import struct
+import sys
+import threading
+
+from . import connection
+from . import process
+from . import reduction
+from . import semaphore_tracker
+from . import spawn
+from . import util
+
+__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',
+ 'set_forkserver_preload']
+
+#
+#
+#
+
+MAXFDS_TO_SEND = 256
+UNSIGNED_STRUCT = struct.Struct('Q') # large enough for pid_t
+
+#
+# Forkserver class
+#
+
+class ForkServer(object):
+
+ def __init__(self):
+ self._forkserver_address = None
+ self._forkserver_alive_fd = None
+ self._inherited_fds = None
+ self._lock = threading.Lock()
+ self._preload_modules = ['__main__']
+
+ def set_forkserver_preload(self, modules_names):
+ '''Set list of module names to try to load in forkserver process.'''
+ if not all(type(mod) is str for mod in self._preload_modules):
+ raise TypeError('module_names must be a list of strings')
+ self._preload_modules = modules_names
+
+ def get_inherited_fds(self):
+ '''Return list of fds inherited from parent process.
+
+ This returns None if the current process was not started by fork
+ server.
+ '''
+ return self._inherited_fds
+
+ def connect_to_new_process(self, fds):
+ '''Request forkserver to create a child process.
+
+ Returns a pair of fds (status_r, data_w). The calling process can read
+ the child process's pid and (eventually) its returncode from status_r.
+ The calling process should write to data_w the pickled preparation and
+ process data.
+ '''
+ self.ensure_running()
+ if len(fds) + 4 >= MAXFDS_TO_SEND:
+ raise ValueError('too many fds')
+ with socket.socket(socket.AF_UNIX) as client:
+ client.connect(self._forkserver_address)
+ parent_r, child_w = os.pipe()
+ child_r, parent_w = os.pipe()
+ allfds = [child_r, child_w, self._forkserver_alive_fd,
+ semaphore_tracker.getfd()]
+ allfds += fds
+ try:
+ reduction.sendfds(client, allfds)
+ return parent_r, parent_w
+ except:
+ os.close(parent_r)
+ os.close(parent_w)
+ raise
+ finally:
+ os.close(child_r)
+ os.close(child_w)
+
+ def ensure_running(self):
+ '''Make sure that a fork server is running.
+
+ This can be called from any process. Note that usually a child
+ process will just reuse the forkserver started by its parent, so
+ ensure_running() will do nothing.
+ '''
+ with self._lock:
+ semaphore_tracker.ensure_running()
+ if self._forkserver_alive_fd is not None:
+ return
+
+ cmd = ('from multiprocessing.forkserver import main; ' +
+ 'main(%d, %d, %r, **%r)')
+
+ if self._preload_modules:
+ desired_keys = {'main_path', 'sys_path'}
+ data = spawn.get_preparation_data('ignore')
+ data = dict((x,y) for (x,y) in data.items()
+ if x in desired_keys)
+ else:
+ data = {}
+
+ with socket.socket(socket.AF_UNIX) as listener:
+ address = connection.arbitrary_address('AF_UNIX')
+ listener.bind(address)
+ os.chmod(address, 0o600)
+ listener.listen(100)
+
+ # all client processes own the write end of the "alive" pipe;
+ # when they all terminate the read end becomes ready.
+ alive_r, alive_w = os.pipe()
+ try:
+ fds_to_pass = [listener.fileno(), alive_r]
+ cmd %= (listener.fileno(), alive_r, self._preload_modules,
+ data)
+ exe = spawn.get_executable()
+ args = [exe] + util._args_from_interpreter_flags()
+ args += ['-c', cmd]
+ pid = util.spawnv_passfds(exe, args, fds_to_pass)
+ except:
+ os.close(alive_w)
+ raise
+ finally:
+ os.close(alive_r)
+ self._forkserver_address = address
+ self._forkserver_alive_fd = alive_w
+
+#
+#
+#
+
+def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
+ '''Run forkserver.'''
+ if preload:
+ if '__main__' in preload and main_path is not None:
+ process.current_process()._inheriting = True
+ try:
+ spawn.import_main_path(main_path)
+ finally:
+ del process.current_process()._inheriting
+ for modname in preload:
+ try:
+ __import__(modname)
+ except ImportError:
+ pass
+
+ # close sys.stdin
+ if sys.stdin is not None:
+ try:
+ sys.stdin.close()
+ sys.stdin = open(os.devnull)
+ except (OSError, ValueError):
+ pass
+
+ # ignoring SIGCHLD means no need to reap zombie processes
+ handler = signal.signal(signal.SIGCHLD, signal.SIG_IGN)
+ with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \
+ selectors.DefaultSelector() as selector:
+ _forkserver._forkserver_address = listener.getsockname()
+
+ selector.register(listener, selectors.EVENT_READ)
+ selector.register(alive_r, selectors.EVENT_READ)
+
+ while True:
+ try:
+ while True:
+ rfds = [key.fileobj for (key, events) in selector.select()]
+ if rfds:
+ break
+
+ if alive_r in rfds:
+ # EOF because no more client processes left
+ assert os.read(alive_r, 1) == b''
+ raise SystemExit
+
+ assert listener in rfds
+ with listener.accept()[0] as s:
+ code = 1
+ if os.fork() == 0:
+ try:
+ _serve_one(s, listener, alive_r, handler)
+ except Exception:
+ sys.excepthook(*sys.exc_info())
+ sys.stderr.flush()
+ finally:
+ os._exit(code)
+
+ except InterruptedError:
+ pass
+ except OSError as e:
+ if e.errno != errno.ECONNABORTED:
+ raise
+
+def _serve_one(s, listener, alive_r, handler):
+ # close unnecessary stuff and reset SIGCHLD handler
+ listener.close()
+ os.close(alive_r)
+ signal.signal(signal.SIGCHLD, handler)
+
+ # receive fds from parent process
+ fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
+ s.close()
+ assert len(fds) <= MAXFDS_TO_SEND
+ (child_r, child_w, _forkserver._forkserver_alive_fd,
+ stfd, *_forkserver._inherited_fds) = fds
+ semaphore_tracker._semaphore_tracker._fd = stfd
+
+ # send pid to client processes
+ write_unsigned(child_w, os.getpid())
+
+ # reseed random number generator
+ if 'random' in sys.modules:
+ import random
+ random.seed()
+
+ # run process object received over pipe
+ code = spawn._main(child_r)
+
+ # write the exit code to the pipe
+ write_unsigned(child_w, code)
+
+#
+# Read and write unsigned numbers
+#
+
+def read_unsigned(fd):
+ data = b''
+ length = UNSIGNED_STRUCT.size
+ while len(data) < length:
+ while True:
+ try:
+ s = os.read(fd, length - len(data))
+ except InterruptedError:
+ pass
+ else:
+ break
+ if not s:
+ raise EOFError('unexpected EOF')
+ data += s
+ return UNSIGNED_STRUCT.unpack(data)[0]
+
+def write_unsigned(fd, n):
+ msg = UNSIGNED_STRUCT.pack(n)
+ while msg:
+ while True:
+ try:
+ nbytes = os.write(fd, msg)
+ except InterruptedError:
+ pass
+ else:
+ break
+ if nbytes == 0:
+ raise RuntimeError('should not get here')
+ msg = msg[nbytes:]
+
+#
+#
+#
+
+_forkserver = ForkServer()
+ensure_running = _forkserver.ensure_running
+get_inherited_fds = _forkserver.get_inherited_fds
+connect_to_new_process = _forkserver.connect_to_new_process
+set_forkserver_preload = _forkserver.set_forkserver_preload
diff --git a/Lib/multiprocessing/heap.py b/Lib/multiprocessing/heap.py
index e63fdb8755..344a45f89d 100644
--- a/Lib/multiprocessing/heap.py
+++ b/Lib/multiprocessing/heap.py
@@ -11,12 +11,12 @@ import bisect
import mmap
import os
import sys
+import tempfile
import threading
-import itertools
-import _multiprocessing
-from multiprocessing.util import Finalize, info
-from multiprocessing.forking import assert_spawning
+from . import context
+from . import reduction
+from . import util
__all__ = ['BufferWrapper']
@@ -30,17 +30,25 @@ if sys.platform == 'win32':
class Arena(object):
- _counter = itertools.count()
+ _rand = tempfile._RandomNameSequence()
def __init__(self, size):
self.size = size
- self.name = 'pym-%d-%d' % (os.getpid(), next(Arena._counter))
- self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
- assert _winapi.GetLastError() == 0, 'tagname already in use'
+ for i in range(100):
+ name = 'pym-%d-%s' % (os.getpid(), next(self._rand))
+ buf = mmap.mmap(-1, size, tagname=name)
+ if _winapi.GetLastError() == 0:
+ break
+ # We have reopened a preexisting mmap.
+ buf.close()
+ else:
+ raise FileExistsError('Cannot find name for new mmap')
+ self.name = name
+ self.buffer = buf
self._state = (self.size, self.name)
def __getstate__(self):
- assert_spawning(self)
+ context.assert_spawning(self)
return self._state
def __setstate__(self, state):
@@ -52,10 +60,28 @@ else:
class Arena(object):
- def __init__(self, size):
- self.buffer = mmap.mmap(-1, size)
+ def __init__(self, size, fd=-1):
self.size = size
- self.name = None
+ self.fd = fd
+ if fd == -1:
+ self.fd, name = tempfile.mkstemp(
+ prefix='pym-%d-'%os.getpid(), dir=util.get_temp_dir())
+ os.unlink(name)
+ util.Finalize(self, os.close, (self.fd,))
+ with open(self.fd, 'wb', closefd=False) as f:
+ f.write(b'\0'*size)
+ self.buffer = mmap.mmap(self.fd, self.size)
+
+ def reduce_arena(a):
+ if a.fd == -1:
+ raise ValueError('Arena is unpicklable because '
+ 'forking was enabled when it was created')
+ return rebuild_arena, (a.size, reduction.DupFd(a.fd))
+
+ def rebuild_arena(size, dupfd):
+ return Arena(size, dupfd.detach())
+
+ reduction.register(Arena, reduce_arena)
#
# Class allowing allocation of chunks of memory from arenas
@@ -90,7 +116,7 @@ class Heap(object):
if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
- info('allocating a new mmap of length %d', length)
+ util.info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
@@ -216,7 +242,7 @@ class BufferWrapper(object):
assert 0 <= size < sys.maxsize
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
- Finalize(self, BufferWrapper._heap.free, args=(block,))
+ util.Finalize(self, BufferWrapper._heap.free, args=(block,))
def create_memoryview(self):
(arena, start, stop), size = self._state
diff --git a/Lib/multiprocessing/managers.py b/Lib/multiprocessing/managers.py
index 96056b04ea..66d46fcc2a 100644
--- a/Lib/multiprocessing/managers.py
+++ b/Lib/multiprocessing/managers.py
@@ -19,11 +19,16 @@ import threading
import array
import queue
-from traceback import format_exc
-from multiprocessing import Process, current_process, active_children, Pool, util, connection
-from multiprocessing.process import AuthenticationString
-from multiprocessing.forking import Popen, ForkingPickler
from time import time as _time
+from traceback import format_exc
+
+from . import connection
+from . import context
+from . import pool
+from . import process
+from . import reduction
+from . import util
+from . import get_context
#
# Register some things for pickling
@@ -31,16 +36,14 @@ from time import time as _time
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
-ForkingPickler.register(array.array, reduce_array)
+reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
- ForkingPickler.register(view_type, rebuild_as_list)
- import copyreg
- copyreg.pickle(view_type, rebuild_as_list)
+ reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
@@ -130,7 +133,7 @@ class Server(object):
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
- self.authkey = AuthenticationString(authkey)
+ self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
@@ -146,7 +149,7 @@ class Server(object):
Run the server forever
'''
self.stop_event = threading.Event()
- current_process()._manager_server = self
+ process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
@@ -167,7 +170,7 @@ class Server(object):
while True:
try:
c = self.listener.accept()
- except (OSError, IOError):
+ except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
@@ -436,15 +439,17 @@ class BaseManager(object):
_registry = {}
_Server = Server
- def __init__(self, address=None, authkey=None, serializer='pickle'):
+ def __init__(self, address=None, authkey=None, serializer='pickle',
+ ctx=None):
if authkey is None:
- authkey = current_process().authkey
+ authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
- self._authkey = AuthenticationString(authkey)
+ self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
+ self._ctx = ctx or get_context()
def get_server(self):
'''
@@ -476,7 +481,7 @@ class BaseManager(object):
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
- self._process = Process(
+ self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
@@ -691,11 +696,11 @@ class BaseProxy(object):
self._Client = listener_client[serializer][1]
if authkey is not None:
- self._authkey = AuthenticationString(authkey)
+ self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
- self._authkey = current_process().authkey
+ self._authkey = process.current_process().authkey
if incref:
self._incref()
@@ -704,7 +709,7 @@ class BaseProxy(object):
def _connect(self):
util.debug('making connection to manager')
- name = current_process().name
+ name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
@@ -798,7 +803,7 @@ class BaseProxy(object):
def __reduce__(self):
kwds = {}
- if Popen.thread_is_spawning():
+ if context.get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
@@ -835,14 +840,14 @@ def RebuildProxy(func, token, serializer, kwds):
If possible the shared object is returned, or otherwise a proxy for it.
'''
- server = getattr(current_process(), '_manager_server', None)
+ server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
- not getattr(current_process(), '_inheriting', False)
+ not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
@@ -889,7 +894,7 @@ def AutoProxy(token, serializer, manager=None, authkey=None,
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
- authkey = current_process().authkey
+ authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
@@ -1072,17 +1077,22 @@ ArrayProxy = MakeProxyType('ArrayProxy', (
))
-PoolProxy = MakeProxyType('PoolProxy', (
+BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
- 'map', 'map_async', 'starmap', 'starmap_async', 'terminate'
+ 'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
-PoolProxy._method_to_typeid_ = {
+BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
+class PoolProxy(BasePoolProxy):
+ def __enter__(self):
+ return self
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.terminate()
#
# Definition of SyncManager
@@ -1109,7 +1119,7 @@ SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
-SyncManager.register('Pool', Pool, PoolProxy)
+SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
diff --git a/Lib/multiprocessing/pool.py b/Lib/multiprocessing/pool.py
index 0f2dab48eb..8832a5ceb2 100644
--- a/Lib/multiprocessing/pool.py
+++ b/Lib/multiprocessing/pool.py
@@ -7,7 +7,7 @@
# Licensed to PSF under a Contributor Agreement.
#
-__all__ = ['Pool']
+__all__ = ['Pool', 'ThreadPool']
#
# Imports
@@ -17,10 +17,14 @@ import threading
import queue
import itertools
import collections
+import os
import time
+import traceback
-from multiprocessing import Process, cpu_count, TimeoutError
-from multiprocessing.util import Finalize, debug
+# If threading is available then ThreadPool should be provided. Therefore
+# we avoid top-level imports which are liable to fail on some systems.
+from . import util
+from . import get_context, TimeoutError
#
# Constants representing the state of a pool
@@ -43,6 +47,29 @@ def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
#
+# Hack to embed stringification of remote traceback in local traceback
+#
+
+class RemoteTraceback(Exception):
+ def __init__(self, tb):
+ self.tb = tb
+ def __str__(self):
+ return self.tb
+
+class ExceptionWithTraceback:
+ def __init__(self, exc, tb):
+ tb = traceback.format_exception(type(exc), exc, tb)
+ tb = ''.join(tb)
+ self.exc = exc
+ self.tb = '\n"""\n%s"""' % tb
+ def __reduce__(self):
+ return rebuild_exc, (self.exc, self.tb)
+
+def rebuild_exc(exc, tb):
+ exc.__cause__ = RemoteTraceback(tb)
+ return exc
+
+#
# Code run by worker processes
#
@@ -63,7 +90,8 @@ class MaybeEncodingError(Exception):
return "<MaybeEncodingError: %s>" % str(self)
-def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
+def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
+ wrap_exception=False):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
@@ -78,28 +106,30 @@ def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
- except (EOFError, IOError):
- debug('worker got EOFError or IOError -- exiting')
+ except (EOFError, OSError):
+ util.debug('worker got EOFError or OSError -- exiting')
break
if task is None:
- debug('worker got sentinel -- exiting')
+ util.debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
+ if wrap_exception:
+ e = ExceptionWithTraceback(e, e.__traceback__)
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
- debug("Possible encoding error while sending result: %s" % (
+ util.debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
- debug('worker exiting after %d tasks' % completed)
+ util.debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
@@ -109,10 +139,14 @@ class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
- Process = Process
+ _wrap_exception = True
+
+ def Process(self, *args, **kwds):
+ return self._ctx.Process(*args, **kwds)
def __init__(self, processes=None, initializer=None, initargs=(),
- maxtasksperchild=None):
+ maxtasksperchild=None, context=None):
+ self._ctx = context or get_context()
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
@@ -122,10 +156,7 @@ class Pool(object):
self._initargs = initargs
if processes is None:
- try:
- processes = cpu_count()
- except NotImplementedError:
- processes = 1
+ processes = os.cpu_count() or 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
@@ -162,7 +193,7 @@ class Pool(object):
self._result_handler._state = RUN
self._result_handler.start()
- self._terminate = Finalize(
+ self._terminate = util.Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
@@ -179,7 +210,7 @@ class Pool(object):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
- debug('cleaning up worker %d' % i)
+ util.debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
@@ -193,13 +224,14 @@ class Pool(object):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
- self._initargs, self._maxtasksperchild)
+ self._initargs, self._maxtasksperchild,
+ self._wrap_exception)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
- debug('added worker')
+ util.debug('added worker')
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
@@ -208,9 +240,8 @@ class Pool(object):
self._repopulate_pool()
def _setup_queues(self):
- from .queues import SimpleQueue
- self._inqueue = SimpleQueue()
- self._outqueue = SimpleQueue()
+ self._inqueue = self._ctx.SimpleQueue()
+ self._outqueue = self._ctx.SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
@@ -336,7 +367,7 @@ class Pool(object):
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
- debug('worker handler exiting')
+ util.debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool, cache):
@@ -346,7 +377,7 @@ class Pool(object):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
- debug('task handler found thread._state != RUN')
+ util.debug('task handler found thread._state != RUN')
break
try:
put(task)
@@ -358,27 +389,27 @@ class Pool(object):
pass
else:
if set_length:
- debug('doing set_length()')
+ util.debug('doing set_length()')
set_length(i+1)
continue
break
else:
- debug('task handler got sentinel')
+ util.debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
- debug('task handler sending sentinel to result handler')
+ util.debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
- debug('task handler sending sentinel to workers')
+ util.debug('task handler sending sentinel to workers')
for p in pool:
put(None)
- except IOError:
- debug('task handler got IOError when sending sentinels')
+ except OSError:
+ util.debug('task handler got OSError when sending sentinels')
- debug('task handler exiting')
+ util.debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
@@ -387,17 +418,17 @@ class Pool(object):
while 1:
try:
task = get()
- except (IOError, EOFError):
- debug('result handler got EOFError/IOError -- exiting')
+ except (OSError, EOFError):
+ util.debug('result handler got EOFError/OSError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
- debug('result handler found thread._state=TERMINATE')
+ util.debug('result handler found thread._state=TERMINATE')
break
if task is None:
- debug('result handler got sentinel')
+ util.debug('result handler got sentinel')
break
job, i, obj = task
@@ -409,12 +440,12 @@ class Pool(object):
while cache and thread._state != TERMINATE:
try:
task = get()
- except (IOError, EOFError):
- debug('result handler got EOFError/IOError -- exiting')
+ except (OSError, EOFError):
+ util.debug('result handler got EOFError/OSError -- exiting')
return
if task is None:
- debug('result handler ignoring extra sentinel')
+ util.debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
@@ -423,7 +454,7 @@ class Pool(object):
pass
if hasattr(outqueue, '_reader'):
- debug('ensuring that outqueue is not full')
+ util.debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
@@ -432,10 +463,10 @@ class Pool(object):
if not outqueue._reader.poll():
break
get()
- except (IOError, EOFError):
+ except (OSError, EOFError):
pass
- debug('result handler exiting: len(cache)=%s, thread._state=%s',
+ util.debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
@@ -453,19 +484,19 @@ class Pool(object):
)
def close(self):
- debug('closing pool')
+ util.debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
- debug('terminating pool')
+ util.debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
- debug('joining pool')
+ util.debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
@@ -476,7 +507,7 @@ class Pool(object):
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
- debug('removing tasks from inqueue until task handler finished')
+ util.debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
@@ -486,12 +517,12 @@ class Pool(object):
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
- debug('finalizing pool')
+ util.debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
- debug('helping task handler/workers to finish')
+ util.debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
@@ -501,31 +532,31 @@ class Pool(object):
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
- debug('joining worker handler')
+ util.debug('joining worker handler')
if threading.current_thread() is not worker_handler:
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
- debug('terminating workers')
+ util.debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
- debug('joining task handler')
+ util.debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join()
- debug('joining result handler')
+ util.debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join()
if pool and hasattr(pool[0], 'terminate'):
- debug('joining pool workers')
+ util.debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
- debug('cleaning up worker %d' % p.pid)
+ util.debug('cleaning up worker %d' % p.pid)
p.join()
def __enter__(self):
@@ -710,8 +741,12 @@ class IMapUnorderedIterator(IMapIterator):
#
class ThreadPool(Pool):
+ _wrap_exception = False
- from .dummy import Process
+ @staticmethod
+ def Process(*args, **kwds):
+ from .dummy import Process
+ return Process(*args, **kwds)
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
diff --git a/Lib/multiprocessing/popen_fork.py b/Lib/multiprocessing/popen_fork.py
new file mode 100644
index 0000000000..367e72e2b1
--- /dev/null
+++ b/Lib/multiprocessing/popen_fork.py
@@ -0,0 +1,83 @@
+import os
+import sys
+import signal
+import errno
+
+from . import util
+
+__all__ = ['Popen']
+
+#
+# Start child process using fork
+#
+
+class Popen(object):
+ method = 'fork'
+
+ def __init__(self, process_obj):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ self.returncode = None
+ self._launch(process_obj)
+
+ def duplicate_for_child(self, fd):
+ return fd
+
+ def poll(self, flag=os.WNOHANG):
+ if self.returncode is None:
+ while True:
+ try:
+ pid, sts = os.waitpid(self.pid, flag)
+ except OSError as e:
+ if e.errno == errno.EINTR:
+ continue
+ # Child process not yet created. See #1731717
+ # e.errno == errno.ECHILD == 10
+ return None
+ else:
+ break
+ if pid == self.pid:
+ if os.WIFSIGNALED(sts):
+ self.returncode = -os.WTERMSIG(sts)
+ else:
+ assert os.WIFEXITED(sts)
+ self.returncode = os.WEXITSTATUS(sts)
+ return self.returncode
+
+ def wait(self, timeout=None):
+ if self.returncode is None:
+ if timeout is not None:
+ from multiprocessing.connection import wait
+ if not wait([self.sentinel], timeout):
+ return None
+ # This shouldn't block if wait() returned successfully.
+ return self.poll(os.WNOHANG if timeout == 0.0 else 0)
+ return self.returncode
+
+ def terminate(self):
+ if self.returncode is None:
+ try:
+ os.kill(self.pid, signal.SIGTERM)
+ except ProcessLookupError:
+ pass
+ except OSError:
+ if self.wait(timeout=0.1) is None:
+ raise
+
+ def _launch(self, process_obj):
+ code = 1
+ parent_r, child_w = os.pipe()
+ self.pid = os.fork()
+ if self.pid == 0:
+ try:
+ os.close(parent_r)
+ if 'random' in sys.modules:
+ import random
+ random.seed()
+ code = process_obj._bootstrap()
+ finally:
+ os._exit(code)
+ else:
+ os.close(child_w)
+ util.Finalize(self, os.close, (parent_r,))
+ self.sentinel = parent_r
diff --git a/Lib/multiprocessing/popen_forkserver.py b/Lib/multiprocessing/popen_forkserver.py
new file mode 100644
index 0000000000..e792194f44
--- /dev/null
+++ b/Lib/multiprocessing/popen_forkserver.py
@@ -0,0 +1,69 @@
+import io
+import os
+
+from . import reduction
+if not reduction.HAVE_SEND_HANDLE:
+ raise ImportError('No support for sending fds between processes')
+from . import context
+from . import forkserver
+from . import popen_fork
+from . import spawn
+from . import util
+
+
+__all__ = ['Popen']
+
+#
+# Wrapper for an fd used while launching a process
+#
+
+class _DupFd(object):
+ def __init__(self, ind):
+ self.ind = ind
+ def detach(self):
+ return forkserver.get_inherited_fds()[self.ind]
+
+#
+# Start child process using a server process
+#
+
+class Popen(popen_fork.Popen):
+ method = 'forkserver'
+ DupFd = _DupFd
+
+ def __init__(self, process_obj):
+ self._fds = []
+ super().__init__(process_obj)
+
+ def duplicate_for_child(self, fd):
+ self._fds.append(fd)
+ return len(self._fds) - 1
+
+ def _launch(self, process_obj):
+ prep_data = spawn.get_preparation_data(process_obj._name)
+ buf = io.BytesIO()
+ context.set_spawning_popen(self)
+ try:
+ reduction.dump(prep_data, buf)
+ reduction.dump(process_obj, buf)
+ finally:
+ context.set_spawning_popen(None)
+
+ self.sentinel, w = forkserver.connect_to_new_process(self._fds)
+ util.Finalize(self, os.close, (self.sentinel,))
+ with open(w, 'wb', closefd=True) as f:
+ f.write(buf.getbuffer())
+ self.pid = forkserver.read_unsigned(self.sentinel)
+
+ def poll(self, flag=os.WNOHANG):
+ if self.returncode is None:
+ from multiprocessing.connection import wait
+ timeout = 0 if flag == os.WNOHANG else None
+ if not wait([self.sentinel], timeout):
+ return None
+ try:
+ self.returncode = forkserver.read_unsigned(self.sentinel)
+ except (OSError, EOFError):
+ # The process ended abnormally perhaps because of a signal
+ self.returncode = 255
+ return self.returncode
diff --git a/Lib/multiprocessing/popen_spawn_posix.py b/Lib/multiprocessing/popen_spawn_posix.py
new file mode 100644
index 0000000000..6b0a8d635f
--- /dev/null
+++ b/Lib/multiprocessing/popen_spawn_posix.py
@@ -0,0 +1,69 @@
+import io
+import os
+
+from . import context
+from . import popen_fork
+from . import reduction
+from . import spawn
+from . import util
+
+__all__ = ['Popen']
+
+
+#
+# Wrapper for an fd used while launching a process
+#
+
+class _DupFd(object):
+ def __init__(self, fd):
+ self.fd = fd
+ def detach(self):
+ return self.fd
+
+#
+# Start child process using a fresh interpreter
+#
+
+class Popen(popen_fork.Popen):
+ method = 'spawn'
+ DupFd = _DupFd
+
+ def __init__(self, process_obj):
+ self._fds = []
+ super().__init__(process_obj)
+
+ def duplicate_for_child(self, fd):
+ self._fds.append(fd)
+ return fd
+
+ def _launch(self, process_obj):
+ from . import semaphore_tracker
+ tracker_fd = semaphore_tracker.getfd()
+ self._fds.append(tracker_fd)
+ prep_data = spawn.get_preparation_data(process_obj._name)
+ fp = io.BytesIO()
+ context.set_spawning_popen(self)
+ try:
+ reduction.dump(prep_data, fp)
+ reduction.dump(process_obj, fp)
+ finally:
+ context.set_spawning_popen(None)
+
+ parent_r = child_w = child_r = parent_w = None
+ try:
+ parent_r, child_w = os.pipe()
+ child_r, parent_w = os.pipe()
+ cmd = spawn.get_command_line(tracker_fd=tracker_fd,
+ pipe_handle=child_r)
+ self._fds.extend([child_r, child_w])
+ self.pid = util.spawnv_passfds(spawn.get_executable(),
+ cmd, self._fds)
+ self.sentinel = parent_r
+ with open(parent_w, 'wb', closefd=False) as f:
+ f.write(fp.getbuffer())
+ finally:
+ if parent_r is not None:
+ util.Finalize(self, os.close, (parent_r,))
+ for fd in (child_r, child_w, parent_w):
+ if fd is not None:
+ os.close(fd)
diff --git a/Lib/multiprocessing/popen_spawn_win32.py b/Lib/multiprocessing/popen_spawn_win32.py
new file mode 100644
index 0000000000..3b53068be4
--- /dev/null
+++ b/Lib/multiprocessing/popen_spawn_win32.py
@@ -0,0 +1,99 @@
+import os
+import msvcrt
+import signal
+import sys
+import _winapi
+
+from . import context
+from . import spawn
+from . import reduction
+from . import util
+
+__all__ = ['Popen']
+
+#
+#
+#
+
+TERMINATE = 0x10000
+WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
+WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
+
+#
+# We define a Popen class similar to the one from subprocess, but
+# whose constructor takes a process object as its argument.
+#
+
+class Popen(object):
+ '''
+ Start a subprocess to run the code of a process object
+ '''
+ method = 'spawn'
+
+ def __init__(self, process_obj):
+ prep_data = spawn.get_preparation_data(process_obj._name)
+
+ # read end of pipe will be "stolen" by the child process
+ # -- see spawn_main() in spawn.py.
+ rhandle, whandle = _winapi.CreatePipe(None, 0)
+ wfd = msvcrt.open_osfhandle(whandle, 0)
+ cmd = spawn.get_command_line(parent_pid=os.getpid(),
+ pipe_handle=rhandle)
+ cmd = ' '.join('"%s"' % x for x in cmd)
+
+ with open(wfd, 'wb', closefd=True) as to_child:
+ # start process
+ try:
+ hp, ht, pid, tid = _winapi.CreateProcess(
+ spawn.get_executable(), cmd,
+ None, None, False, 0, None, None, None)
+ _winapi.CloseHandle(ht)
+ except:
+ _winapi.CloseHandle(rhandle)
+ raise
+
+ # set attributes of self
+ self.pid = pid
+ self.returncode = None
+ self._handle = hp
+ self.sentinel = int(hp)
+ util.Finalize(self, _winapi.CloseHandle, (self.sentinel,))
+
+ # send information to child
+ context.set_spawning_popen(self)
+ try:
+ reduction.dump(prep_data, to_child)
+ reduction.dump(process_obj, to_child)
+ finally:
+ context.set_spawning_popen(None)
+
+ def duplicate_for_child(self, handle):
+ assert self is context.get_spawning_popen()
+ return reduction.duplicate(handle, self.sentinel)
+
+ def wait(self, timeout=None):
+ if self.returncode is None:
+ if timeout is None:
+ msecs = _winapi.INFINITE
+ else:
+ msecs = max(0, int(timeout * 1000 + 0.5))
+
+ res = _winapi.WaitForSingleObject(int(self._handle), msecs)
+ if res == _winapi.WAIT_OBJECT_0:
+ code = _winapi.GetExitCodeProcess(self._handle)
+ if code == TERMINATE:
+ code = -signal.SIGTERM
+ self.returncode = code
+
+ return self.returncode
+
+ def poll(self):
+ return self.wait(timeout=0)
+
+ def terminate(self):
+ if self.returncode is None:
+ try:
+ _winapi.TerminateProcess(int(self._handle), TERMINATE)
+ except OSError:
+ if self.wait(timeout=1.0) is None:
+ raise
diff --git a/Lib/multiprocessing/process.py b/Lib/multiprocessing/process.py
index 3d32add71a..68959bf9f4 100644
--- a/Lib/multiprocessing/process.py
+++ b/Lib/multiprocessing/process.py
@@ -7,7 +7,7 @@
# Licensed to PSF under a Contributor Agreement.
#
-__all__ = ['Process', 'current_process', 'active_children']
+__all__ = ['BaseProcess', 'current_process', 'active_children']
#
# Imports
@@ -43,7 +43,7 @@ def active_children():
Return list of process objects corresponding to live child processes
'''
_cleanup()
- return list(_current_process._children)
+ return list(_children)
#
#
@@ -51,33 +51,29 @@ def active_children():
def _cleanup():
# check for processes which have finished
- for p in list(_current_process._children):
+ for p in list(_children):
if p._popen.poll() is not None:
- _current_process._children.discard(p)
+ _children.discard(p)
#
# The `Process` class
#
-class Process(object):
+class BaseProcess(object):
'''
Process objects represent activity that is run in a separate process
- The class is analagous to `threading.Thread`
+ The class is analogous to `threading.Thread`
'''
- _Popen = None
+ def _Popen(self):
+ raise NotImplementedError
def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
*, daemon=None):
assert group is None, 'group argument must be None for now'
- count = next(_current_process._counter)
+ count = next(_process_counter)
self._identity = _current_process._identity + (count,)
- self._authkey = _current_process._authkey
- if daemon is not None:
- self._daemonic = daemon
- else:
- self._daemonic = _current_process._daemonic
- self._tempdir = _current_process._tempdir
+ self._config = _current_process._config.copy()
self._parent_pid = os.getpid()
self._popen = None
self._target = target
@@ -85,6 +81,8 @@ class Process(object):
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
+ if daemon is not None:
+ self.daemon = daemon
_dangling.add(self)
def run(self):
@@ -101,16 +99,12 @@ class Process(object):
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
- assert not _current_process._daemonic, \
+ assert not _current_process._config.get('daemon'), \
'daemonic processes are not allowed to have children'
_cleanup()
- if self._Popen is not None:
- Popen = self._Popen
- else:
- from .forking import Popen
- self._popen = Popen(self)
+ self._popen = self._Popen(self)
self._sentinel = self._popen.sentinel
- _current_process._children.add(self)
+ _children.add(self)
def terminate(self):
'''
@@ -126,7 +120,7 @@ class Process(object):
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
- _current_process._children.discard(self)
+ _children.discard(self)
def is_alive(self):
'''
@@ -154,7 +148,7 @@ class Process(object):
'''
Return whether process is a daemon
'''
- return self._daemonic
+ return self._config.get('daemon', False)
@daemon.setter
def daemon(self, daemonic):
@@ -162,18 +156,18 @@ class Process(object):
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
- self._daemonic = daemonic
+ self._config['daemon'] = daemonic
@property
def authkey(self):
- return self._authkey
+ return self._config['authkey']
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
- self._authkey = AuthenticationString(authkey)
+ self._config['authkey'] = AuthenticationString(authkey)
@property
def exitcode(self):
@@ -227,17 +221,19 @@ class Process(object):
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
- status, self._daemonic and ' daemon' or '')
+ status, self.daemon and ' daemon' or '')
##
def _bootstrap(self):
- from . import util
- global _current_process
+ from . import util, context
+ global _current_process, _process_counter, _children
try:
- self._children = set()
- self._counter = itertools.count(1)
+ if self._start_method is not None:
+ context._force_start_method(self._start_method)
+ _process_counter = itertools.count(1)
+ _children = set()
if sys.stdin is not None:
try:
sys.stdin.close()
@@ -285,8 +281,8 @@ class Process(object):
class AuthenticationString(bytes):
def __reduce__(self):
- from .forking import Popen
- if not Popen.thread_is_spawning():
+ from .context import get_spawning_popen
+ if get_spawning_popen() is None:
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
@@ -297,20 +293,29 @@ class AuthenticationString(bytes):
# Create object representing the main process
#
-class _MainProcess(Process):
+class _MainProcess(BaseProcess):
def __init__(self):
self._identity = ()
- self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
- self._counter = itertools.count(1)
- self._children = set()
- self._authkey = AuthenticationString(os.urandom(32))
- self._tempdir = None
+ self._config = {'authkey': AuthenticationString(os.urandom(32)),
+ 'semprefix': '/mp'}
+ # Note that some versions of FreeBSD only allow named
+ # semaphores to have names of up to 14 characters. Therefore
+ # we choose a short prefix.
+ #
+ # On MacOSX in a sandbox it may be necessary to use a
+ # different prefix -- see #19478.
+ #
+ # Everything in self._config will be inherited by descendant
+ # processes.
+
_current_process = _MainProcess()
+_process_counter = itertools.count(1)
+_children = set()
del _MainProcess
#
diff --git a/Lib/multiprocessing/queues.py b/Lib/multiprocessing/queues.py
index 37271fb4eb..f650771092 100644
--- a/Lib/multiprocessing/queues.py
+++ b/Lib/multiprocessing/queues.py
@@ -18,11 +18,14 @@ import weakref
import errno
from queue import Empty, Full
+
import _multiprocessing
-from multiprocessing.connection import Pipe
-from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
-from multiprocessing.util import debug, info, Finalize, register_after_fork
-from multiprocessing.forking import assert_spawning
+
+from . import connection
+from . import context
+
+from .util import debug, info, Finalize, register_after_fork, is_exiting
+from .reduction import ForkingPickler
#
# Queue type using a pipe, buffer and thread
@@ -30,18 +33,18 @@ from multiprocessing.forking import assert_spawning
class Queue(object):
- def __init__(self, maxsize=0):
+ def __init__(self, maxsize=0, *, ctx):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
- self._reader, self._writer = Pipe(duplex=False)
- self._rlock = Lock()
+ self._reader, self._writer = connection.Pipe(duplex=False)
+ self._rlock = ctx.Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
- self._wlock = Lock()
- self._sem = BoundedSemaphore(maxsize)
+ self._wlock = ctx.Lock()
+ self._sem = ctx.BoundedSemaphore(maxsize)
# For use by concurrent.futures
self._ignore_epipe = False
@@ -51,7 +54,7 @@ class Queue(object):
register_after_fork(self, Queue._after_fork)
def __getstate__(self):
- assert_spawning(self)
+ context.assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
@@ -69,8 +72,8 @@ class Queue(object):
self._joincancelled = False
self._closed = False
self._close = None
- self._send = self._writer.send
- self._recv = self._reader.recv
+ self._send_bytes = self._writer.send_bytes
+ self._recv_bytes = self._reader.recv_bytes
self._poll = self._reader.poll
def put(self, obj, block=True, timeout=None):
@@ -89,14 +92,9 @@ class Queue(object):
def get(self, block=True, timeout=None):
if block and timeout is None:
- self._rlock.acquire()
- try:
- res = self._recv()
- self._sem.release()
- return res
- finally:
- self._rlock.release()
-
+ with self._rlock:
+ res = self._recv_bytes()
+ self._sem.release()
else:
if block:
deadline = time.time() + timeout
@@ -109,11 +107,12 @@ class Queue(object):
raise Empty
elif not self._poll():
raise Empty
- res = self._recv()
+ res = self._recv_bytes()
self._sem.release()
- return res
finally:
self._rlock.release()
+ # unserialize the data after having released the lock
+ return ForkingPickler.loads(res)
def qsize(self):
# Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
@@ -158,7 +157,7 @@ class Queue(object):
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
- args=(self._buffer, self._notempty, self._send,
+ args=(self._buffer, self._notempty, self._send_bytes,
self._wlock, self._writer.close, self._ignore_epipe),
name='QueueFeederThread'
)
@@ -210,10 +209,8 @@ class Queue(object):
notempty.release()
@staticmethod
- def _feed(buffer, notempty, send, writelock, close, ignore_epipe):
+ def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe):
debug('starting thread to feed data to pipe')
- from .util import is_exiting
-
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
@@ -241,12 +238,14 @@ class Queue(object):
close()
return
+ # serialize the data before acquiring the lock
+ obj = ForkingPickler.dumps(obj)
if wacquire is None:
- send(obj)
+ send_bytes(obj)
else:
wacquire()
try:
- send(obj)
+ send_bytes(obj)
finally:
wrelease()
except IndexError:
@@ -279,10 +278,10 @@ _sentinel = object()
class JoinableQueue(Queue):
- def __init__(self, maxsize=0):
- Queue.__init__(self, maxsize)
- self._unfinished_tasks = Semaphore(0)
- self._cond = Condition()
+ def __init__(self, maxsize=0, *, ctx):
+ Queue.__init__(self, maxsize, ctx=ctx)
+ self._unfinished_tasks = ctx.Semaphore(0)
+ self._cond = ctx.Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
@@ -332,48 +331,37 @@ class JoinableQueue(Queue):
class SimpleQueue(object):
- def __init__(self):
- self._reader, self._writer = Pipe(duplex=False)
- self._rlock = Lock()
+ def __init__(self, *, ctx):
+ self._reader, self._writer = connection.Pipe(duplex=False)
+ self._rlock = ctx.Lock()
self._poll = self._reader.poll
if sys.platform == 'win32':
self._wlock = None
else:
- self._wlock = Lock()
- self._make_methods()
+ self._wlock = ctx.Lock()
def empty(self):
return not self._poll()
def __getstate__(self):
- assert_spawning(self)
+ context.assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
- self._make_methods()
- def _make_methods(self):
- recv = self._reader.recv
- racquire, rrelease = self._rlock.acquire, self._rlock.release
- def get():
- racquire()
- try:
- return recv()
- finally:
- rrelease()
- self.get = get
+ def get(self):
+ with self._rlock:
+ res = self._reader.recv_bytes()
+ # unserialize the data after having released the lock
+ return ForkingPickler.loads(res)
+ def put(self, obj):
+ # serialize the data before acquiring the lock
+ obj = ForkingPickler.dumps(obj)
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
- self.put = self._writer.send
+ self._writer.send_bytes(obj)
else:
- send = self._writer.send
- wacquire, wrelease = self._wlock.acquire, self._wlock.release
- def put(obj):
- wacquire()
- try:
- return send(obj)
- finally:
- wrelease()
- self.put = put
+ with self._wlock:
+ self._writer.send_bytes(obj)
diff --git a/Lib/multiprocessing/reduction.py b/Lib/multiprocessing/reduction.py
index 656fa8ff6b..8f209b47da 100644
--- a/Lib/multiprocessing/reduction.py
+++ b/Lib/multiprocessing/reduction.py
@@ -1,6 +1,5 @@
#
-# Module to allow connection and socket objects to be transferred
-# between processes
+# Module which deals with pickling of objects.
#
# multiprocessing/reduction.py
#
@@ -8,27 +7,56 @@
# Licensed to PSF under a Contributor Agreement.
#
-__all__ = ['reduce_socket', 'reduce_connection', 'send_handle', 'recv_handle']
-
+import copyreg
+import functools
+import io
import os
-import sys
+import pickle
import socket
-import threading
-import struct
-import signal
+import sys
-from multiprocessing import current_process
-from multiprocessing.util import register_after_fork, debug, sub_debug
-from multiprocessing.util import is_exiting, sub_warning
+from . import context
+__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
+
+
+HAVE_SEND_HANDLE = (sys.platform == 'win32' or
+ (hasattr(socket, 'CMSG_LEN') and
+ hasattr(socket, 'SCM_RIGHTS') and
+ hasattr(socket.socket, 'sendmsg')))
#
+# Pickler subclass
#
-#
-if not(sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and
- hasattr(socket, 'SCM_RIGHTS'))):
- raise ImportError('pickling of connections not supported')
+class ForkingPickler(pickle.Pickler):
+ '''Pickler subclass used by multiprocessing.'''
+ _extra_reducers = {}
+ _copyreg_dispatch_table = copyreg.dispatch_table
+
+ def __init__(self, *args):
+ super().__init__(*args)
+ self.dispatch_table = self._copyreg_dispatch_table.copy()
+ self.dispatch_table.update(self._extra_reducers)
+
+ @classmethod
+ def register(cls, type, reduce):
+ '''Register a reduce function for a type.'''
+ cls._extra_reducers[type] = reduce
+
+ @classmethod
+ def dumps(cls, obj, protocol=None):
+ buf = io.BytesIO()
+ cls(buf, protocol).dump(obj)
+ return buf.getbuffer()
+
+ loads = pickle.loads
+
+register = ForkingPickler.register
+
+def dump(obj, file, protocol=None):
+ '''Replacement for pickle.dump() using ForkingPickler.'''
+ ForkingPickler(file, protocol).dump(obj)
#
# Platform specific definitions
@@ -36,20 +64,44 @@ if not(sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and
if sys.platform == 'win32':
# Windows
- __all__ += ['reduce_pipe_connection']
+ __all__ += ['DupHandle', 'duplicate', 'steal_handle']
import _winapi
+ def duplicate(handle, target_process=None, inheritable=False):
+ '''Duplicate a handle. (target_process is a handle not a pid!)'''
+ if target_process is None:
+ target_process = _winapi.GetCurrentProcess()
+ return _winapi.DuplicateHandle(
+ _winapi.GetCurrentProcess(), handle, target_process,
+ 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
+
+ def steal_handle(source_pid, handle):
+ '''Steal a handle from process identified by source_pid.'''
+ source_process_handle = _winapi.OpenProcess(
+ _winapi.PROCESS_DUP_HANDLE, False, source_pid)
+ try:
+ return _winapi.DuplicateHandle(
+ source_process_handle, handle,
+ _winapi.GetCurrentProcess(), 0, False,
+ _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
+ finally:
+ _winapi.CloseHandle(source_process_handle)
+
def send_handle(conn, handle, destination_pid):
+ '''Send a handle over a local connection.'''
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
conn.send(dh)
def recv_handle(conn):
+ '''Receive a handle over a local connection.'''
return conn.recv().detach()
class DupHandle(object):
+ '''Picklable wrapper for a handle.'''
def __init__(self, handle, access, pid=None):
- # duplicate handle for process with given pid
if pid is None:
+ # We just duplicate the handle in the current process and
+ # let the receiving process steal the handle.
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
@@ -62,9 +114,12 @@ if sys.platform == 'win32':
self._pid = pid
def detach(self):
+ '''Get the handle. This should only be called once.'''
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
+ # The handle has already been duplicated for this process.
return self._handle
+ # We must steal the handle from the process whose pid is self._pid.
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
@@ -74,207 +129,112 @@ if sys.platform == 'win32':
finally:
_winapi.CloseHandle(proc)
- class DupSocket(object):
- def __init__(self, sock):
- new_sock = sock.dup()
- def send(conn, pid):
- share = new_sock.share(pid)
- conn.send_bytes(share)
- self._id = resource_sharer.register(send, new_sock.close)
-
- def detach(self):
- conn = resource_sharer.get_connection(self._id)
- try:
- share = conn.recv_bytes()
- return socket.fromshare(share)
- finally:
- conn.close()
-
- def reduce_socket(s):
- return rebuild_socket, (DupSocket(s),)
-
- def rebuild_socket(ds):
- return ds.detach()
-
- def reduce_connection(conn):
- handle = conn.fileno()
- with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
- ds = DupSocket(s)
- return rebuild_connection, (ds, conn.readable, conn.writable)
-
- def rebuild_connection(ds, readable, writable):
- from .connection import Connection
- sock = ds.detach()
- return Connection(sock.detach(), readable, writable)
-
- def reduce_pipe_connection(conn):
- access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
- (_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
- dh = DupHandle(conn.fileno(), access)
- return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
-
- def rebuild_pipe_connection(dh, readable, writable):
- from .connection import PipeConnection
- handle = dh.detach()
- return PipeConnection(handle, readable, writable)
-
else:
# Unix
+ __all__ += ['DupFd', 'sendfds', 'recvfds']
+ import array
# On MacOSX we should acknowledge receipt of fds -- see Issue14669
ACKNOWLEDGE = sys.platform == 'darwin'
+ def sendfds(sock, fds):
+ '''Send an array of fds over an AF_UNIX socket.'''
+ fds = array.array('i', fds)
+ msg = bytes([len(fds) % 256])
+ sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
+ if ACKNOWLEDGE and sock.recv(1) != b'A':
+ raise RuntimeError('did not receive acknowledgement of fd')
+
+ def recvfds(sock, size):
+ '''Receive an array of fds over an AF_UNIX socket.'''
+ a = array.array('i')
+ bytes_size = a.itemsize * size
+ msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size))
+ if not msg and not ancdata:
+ raise EOFError
+ try:
+ if ACKNOWLEDGE:
+ sock.send(b'A')
+ if len(ancdata) != 1:
+ raise RuntimeError('received %d items of ancdata' %
+ len(ancdata))
+ cmsg_level, cmsg_type, cmsg_data = ancdata[0]
+ if (cmsg_level == socket.SOL_SOCKET and
+ cmsg_type == socket.SCM_RIGHTS):
+ if len(cmsg_data) % a.itemsize != 0:
+ raise ValueError
+ a.frombytes(cmsg_data)
+ assert len(a) % 256 == msg[0]
+ return list(a)
+ except (ValueError, IndexError):
+ pass
+ raise RuntimeError('Invalid data received')
+
def send_handle(conn, handle, destination_pid):
+ '''Send a handle over a local connection.'''
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
- s.sendmsg([b'x'], [(socket.SOL_SOCKET, socket.SCM_RIGHTS,
- struct.pack("@i", handle))])
- if ACKNOWLEDGE and conn.recv_bytes() != b'ACK':
- raise RuntimeError('did not receive acknowledgement of fd')
+ sendfds(s, [handle])
def recv_handle(conn):
- size = struct.calcsize("@i")
+ '''Receive a handle over a local connection.'''
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
- msg, ancdata, flags, addr = s.recvmsg(1, socket.CMSG_LEN(size))
- try:
- if ACKNOWLEDGE:
- conn.send_bytes(b'ACK')
- cmsg_level, cmsg_type, cmsg_data = ancdata[0]
- if (cmsg_level == socket.SOL_SOCKET and
- cmsg_type == socket.SCM_RIGHTS):
- return struct.unpack("@i", cmsg_data[:size])[0]
- except (ValueError, IndexError, struct.error):
- pass
- raise RuntimeError('Invalid data received')
-
- class DupFd(object):
- def __init__(self, fd):
- new_fd = os.dup(fd)
- def send(conn, pid):
- send_handle(conn, new_fd, pid)
- def close():
- os.close(new_fd)
- self._id = resource_sharer.register(send, close)
+ return recvfds(s, 1)[0]
+
+ def DupFd(fd):
+ '''Return a wrapper for an fd.'''
+ popen_obj = context.get_spawning_popen()
+ if popen_obj is not None:
+ return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
+ elif HAVE_SEND_HANDLE:
+ from . import resource_sharer
+ return resource_sharer.DupFd(fd)
+ else:
+ raise ValueError('SCM_RIGHTS appears not to be available')
- def detach(self):
- conn = resource_sharer.get_connection(self._id)
- try:
- return recv_handle(conn)
- finally:
- conn.close()
+#
+# Try making some callable types picklable
+#
- def reduce_socket(s):
- df = DupFd(s.fileno())
- return rebuild_socket, (df, s.family, s.type, s.proto)
+def _reduce_method(m):
+ if m.__self__ is None:
+ return getattr, (m.__class__, m.__func__.__name__)
+ else:
+ return getattr, (m.__self__, m.__func__.__name__)
+class _C:
+ def f(self):
+ pass
+register(type(_C().f), _reduce_method)
- def rebuild_socket(df, family, type, proto):
- fd = df.detach()
- s = socket.fromfd(fd, family, type, proto)
- os.close(fd)
- return s
- def reduce_connection(conn):
- df = DupFd(conn.fileno())
- return rebuild_connection, (df, conn.readable, conn.writable)
+def _reduce_method_descriptor(m):
+ return getattr, (m.__objclass__, m.__name__)
+register(type(list.append), _reduce_method_descriptor)
+register(type(int.__add__), _reduce_method_descriptor)
- def rebuild_connection(df, readable, writable):
- from .connection import Connection
- fd = df.detach()
- return Connection(fd, readable, writable)
+
+def _reduce_partial(p):
+ return _rebuild_partial, (p.func, p.args, p.keywords or {})
+def _rebuild_partial(func, args, keywords):
+ return functools.partial(func, *args, **keywords)
+register(functools.partial, _reduce_partial)
#
-# Server which shares registered resources with clients
+# Make sockets picklable
#
-class ResourceSharer(object):
- def __init__(self):
- self._key = 0
- self._cache = {}
- self._old_locks = []
- self._lock = threading.Lock()
- self._listener = None
- self._address = None
- self._thread = None
- register_after_fork(self, ResourceSharer._afterfork)
-
- def register(self, send, close):
- with self._lock:
- if self._address is None:
- self._start()
- self._key += 1
- self._cache[self._key] = (send, close)
- return (self._address, self._key)
-
- @staticmethod
- def get_connection(ident):
- from .connection import Client
- address, key = ident
- c = Client(address, authkey=current_process().authkey)
- c.send((key, os.getpid()))
- return c
-
- def stop(self, timeout=None):
- from .connection import Client
- with self._lock:
- if self._address is not None:
- c = Client(self._address, authkey=current_process().authkey)
- c.send(None)
- c.close()
- self._thread.join(timeout)
- if self._thread.is_alive():
- sub_warn('ResourceSharer thread did not stop when asked')
- self._listener.close()
- self._thread = None
- self._address = None
- self._listener = None
- for key, (send, close) in self._cache.items():
- close()
- self._cache.clear()
-
- def _afterfork(self):
- for key, (send, close) in self._cache.items():
- close()
- self._cache.clear()
- # If self._lock was locked at the time of the fork, it may be broken
- # -- see issue 6721. Replace it without letting it be gc'ed.
- self._old_locks.append(self._lock)
- self._lock = threading.Lock()
- if self._listener is not None:
- self._listener.close()
- self._listener = None
- self._address = None
- self._thread = None
-
- def _start(self):
- from .connection import Listener
- assert self._listener is None
- debug('starting listener and thread for sending handles')
- self._listener = Listener(authkey=current_process().authkey)
- self._address = self._listener.address
- t = threading.Thread(target=self._serve)
- t.daemon = True
- t.start()
- self._thread = t
-
- def _serve(self):
- if hasattr(signal, 'pthread_sigmask'):
- signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
- while 1:
- try:
- conn = self._listener.accept()
- msg = conn.recv()
- if msg is None:
- break
- key, destination_pid = msg
- send, close = self._cache.pop(key)
- send(conn, destination_pid)
- close()
- conn.close()
- except:
- if not is_exiting():
- import traceback
- sub_warning(
- 'thread for sharing handles raised exception :\n' +
- '-'*79 + '\n' + traceback.format_exc() + '-'*79
- )
-
-resource_sharer = ResourceSharer()
+if sys.platform == 'win32':
+ def _reduce_socket(s):
+ from .resource_sharer import DupSocket
+ return _rebuild_socket, (DupSocket(s),)
+ def _rebuild_socket(ds):
+ return ds.detach()
+ register(socket.socket, _reduce_socket)
+
+else:
+ def _reduce_socket(s):
+ df = DupFd(s.fileno())
+ return _rebuild_socket, (df, s.family, s.type, s.proto)
+ def _rebuild_socket(df, family, type, proto):
+ fd = df.detach()
+ return socket.socket(family, type, proto, fileno=fd)
+ register(socket.socket, _reduce_socket)
diff --git a/Lib/multiprocessing/resource_sharer.py b/Lib/multiprocessing/resource_sharer.py
new file mode 100644
index 0000000000..5e46fc65b4
--- /dev/null
+++ b/Lib/multiprocessing/resource_sharer.py
@@ -0,0 +1,158 @@
+#
+# We use a background thread for sharing fds on Unix, and for sharing sockets on
+# Windows.
+#
+# A client which wants to pickle a resource registers it with the resource
+# sharer and gets an identifier in return. The unpickling process will connect
+# to the resource sharer, sends the identifier and its pid, and then receives
+# the resource.
+#
+
+import os
+import signal
+import socket
+import sys
+import threading
+
+from . import process
+from . import reduction
+from . import util
+
+__all__ = ['stop']
+
+
+if sys.platform == 'win32':
+ __all__ += ['DupSocket']
+
+ class DupSocket(object):
+ '''Picklable wrapper for a socket.'''
+ def __init__(self, sock):
+ new_sock = sock.dup()
+ def send(conn, pid):
+ share = new_sock.share(pid)
+ conn.send_bytes(share)
+ self._id = _resource_sharer.register(send, new_sock.close)
+
+ def detach(self):
+ '''Get the socket. This should only be called once.'''
+ with _resource_sharer.get_connection(self._id) as conn:
+ share = conn.recv_bytes()
+ return socket.fromshare(share)
+
+else:
+ __all__ += ['DupFd']
+
+ class DupFd(object):
+ '''Wrapper for fd which can be used at any time.'''
+ def __init__(self, fd):
+ new_fd = os.dup(fd)
+ def send(conn, pid):
+ reduction.send_handle(conn, new_fd, pid)
+ def close():
+ os.close(new_fd)
+ self._id = _resource_sharer.register(send, close)
+
+ def detach(self):
+ '''Get the fd. This should only be called once.'''
+ with _resource_sharer.get_connection(self._id) as conn:
+ return reduction.recv_handle(conn)
+
+
+class _ResourceSharer(object):
+ '''Manager for resouces using background thread.'''
+ def __init__(self):
+ self._key = 0
+ self._cache = {}
+ self._old_locks = []
+ self._lock = threading.Lock()
+ self._listener = None
+ self._address = None
+ self._thread = None
+ util.register_after_fork(self, _ResourceSharer._afterfork)
+
+ def register(self, send, close):
+ '''Register resource, returning an identifier.'''
+ with self._lock:
+ if self._address is None:
+ self._start()
+ self._key += 1
+ self._cache[self._key] = (send, close)
+ return (self._address, self._key)
+
+ @staticmethod
+ def get_connection(ident):
+ '''Return connection from which to receive identified resource.'''
+ from .connection import Client
+ address, key = ident
+ c = Client(address, authkey=process.current_process().authkey)
+ c.send((key, os.getpid()))
+ return c
+
+ def stop(self, timeout=None):
+ '''Stop the background thread and clear registered resources.'''
+ from .connection import Client
+ with self._lock:
+ if self._address is not None:
+ c = Client(self._address,
+ authkey=process.current_process().authkey)
+ c.send(None)
+ c.close()
+ self._thread.join(timeout)
+ if self._thread.is_alive():
+ util.sub_warning('_ResourceSharer thread did '
+ 'not stop when asked')
+ self._listener.close()
+ self._thread = None
+ self._address = None
+ self._listener = None
+ for key, (send, close) in self._cache.items():
+ close()
+ self._cache.clear()
+
+ def _afterfork(self):
+ for key, (send, close) in self._cache.items():
+ close()
+ self._cache.clear()
+ # If self._lock was locked at the time of the fork, it may be broken
+ # -- see issue 6721. Replace it without letting it be gc'ed.
+ self._old_locks.append(self._lock)
+ self._lock = threading.Lock()
+ if self._listener is not None:
+ self._listener.close()
+ self._listener = None
+ self._address = None
+ self._thread = None
+
+ def _start(self):
+ from .connection import Listener
+ assert self._listener is None
+ util.debug('starting listener and thread for sending handles')
+ self._listener = Listener(authkey=process.current_process().authkey)
+ self._address = self._listener.address
+ t = threading.Thread(target=self._serve)
+ t.daemon = True
+ t.start()
+ self._thread = t
+
+ def _serve(self):
+ if hasattr(signal, 'pthread_sigmask'):
+ signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
+ while 1:
+ try:
+ with self._listener.accept() as conn:
+ msg = conn.recv()
+ if msg is None:
+ break
+ key, destination_pid = msg
+ send, close = self._cache.pop(key)
+ try:
+ send(conn, destination_pid)
+ finally:
+ close()
+ except:
+ if not util.is_exiting():
+ sys.excepthook(*sys.exc_info())
+
+
+_resource_sharer = _ResourceSharer()
+stop = _resource_sharer.stop
diff --git a/Lib/multiprocessing/semaphore_tracker.py b/Lib/multiprocessing/semaphore_tracker.py
new file mode 100644
index 0000000000..de7738eeee
--- /dev/null
+++ b/Lib/multiprocessing/semaphore_tracker.py
@@ -0,0 +1,143 @@
+#
+# On Unix we run a server process which keeps track of unlinked
+# semaphores. The server ignores SIGINT and SIGTERM and reads from a
+# pipe. Every other process of the program has a copy of the writable
+# end of the pipe, so we get EOF when all other processes have exited.
+# Then the server process unlinks any remaining semaphore names.
+#
+# This is important because the system only supports a limited number
+# of named semaphores, and they will not be automatically removed till
+# the next reboot. Without this semaphore tracker process, "killall
+# python" would probably leave unlinked semaphores.
+#
+
+import os
+import signal
+import sys
+import threading
+import warnings
+import _multiprocessing
+
+from . import spawn
+from . import util
+
+__all__ = ['ensure_running', 'register', 'unregister']
+
+
+class SemaphoreTracker(object):
+
+ def __init__(self):
+ self._lock = threading.Lock()
+ self._fd = None
+
+ def getfd(self):
+ self.ensure_running()
+ return self._fd
+
+ def ensure_running(self):
+ '''Make sure that semaphore tracker process is running.
+
+ This can be run from any process. Usually a child process will use
+ the semaphore created by its parent.'''
+ with self._lock:
+ if self._fd is not None:
+ return
+ fds_to_pass = []
+ try:
+ fds_to_pass.append(sys.stderr.fileno())
+ except Exception:
+ pass
+ cmd = 'from multiprocessing.semaphore_tracker import main;main(%d)'
+ r, w = os.pipe()
+ try:
+ fds_to_pass.append(r)
+ # process will out live us, so no need to wait on pid
+ exe = spawn.get_executable()
+ args = [exe] + util._args_from_interpreter_flags()
+ args += ['-c', cmd % r]
+ util.spawnv_passfds(exe, args, fds_to_pass)
+ except:
+ os.close(w)
+ raise
+ else:
+ self._fd = w
+ finally:
+ os.close(r)
+
+ def register(self, name):
+ '''Register name of semaphore with semaphore tracker.'''
+ self._send('REGISTER', name)
+
+ def unregister(self, name):
+ '''Unregister name of semaphore with semaphore tracker.'''
+ self._send('UNREGISTER', name)
+
+ def _send(self, cmd, name):
+ self.ensure_running()
+ msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
+ if len(name) > 512:
+ # posix guarantees that writes to a pipe of less than PIPE_BUF
+ # bytes are atomic, and that PIPE_BUF >= 512
+ raise ValueError('name too long')
+ nbytes = os.write(self._fd, msg)
+ assert nbytes == len(msg)
+
+
+_semaphore_tracker = SemaphoreTracker()
+ensure_running = _semaphore_tracker.ensure_running
+register = _semaphore_tracker.register
+unregister = _semaphore_tracker.unregister
+getfd = _semaphore_tracker.getfd
+
+
+def main(fd):
+ '''Run semaphore tracker.'''
+ # protect the process from ^C and "killall python" etc
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+
+ for f in (sys.stdin, sys.stdout):
+ try:
+ f.close()
+ except Exception:
+ pass
+
+ cache = set()
+ try:
+ # keep track of registered/unregistered semaphores
+ with open(fd, 'rb') as f:
+ for line in f:
+ try:
+ cmd, name = line.strip().split(b':')
+ if cmd == b'REGISTER':
+ cache.add(name)
+ elif cmd == b'UNREGISTER':
+ cache.remove(name)
+ else:
+ raise RuntimeError('unrecognized command %r' % cmd)
+ except Exception:
+ try:
+ sys.excepthook(*sys.exc_info())
+ except:
+ pass
+ finally:
+ # all processes have terminated; cleanup any remaining semaphores
+ if cache:
+ try:
+ warnings.warn('semaphore_tracker: There appear to be %d '
+ 'leaked semaphores to clean up at shutdown' %
+ len(cache))
+ except Exception:
+ pass
+ for name in cache:
+ # For some reason the process which created and registered this
+ # semaphore has failed to unregister it. Presumably it has died.
+ # We therefore unlink it.
+ try:
+ name = name.decode('ascii')
+ try:
+ _multiprocessing.sem_unlink(name)
+ except Exception as e:
+ warnings.warn('semaphore_tracker: %r: %s' % (name, e))
+ finally:
+ pass
diff --git a/Lib/multiprocessing/sharedctypes.py b/Lib/multiprocessing/sharedctypes.py
index a358ed4f12..0c178252d5 100644
--- a/Lib/multiprocessing/sharedctypes.py
+++ b/Lib/multiprocessing/sharedctypes.py
@@ -10,8 +10,11 @@
import ctypes
import weakref
-from multiprocessing import heap, RLock
-from multiprocessing.forking import assert_spawning, ForkingPickler
+from . import heap
+from . import get_context
+
+from .context import assert_spawning
+from .reduction import ForkingPickler
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
@@ -63,7 +66,7 @@ def RawArray(typecode_or_type, size_or_initializer):
result.__init__(*size_or_initializer)
return result
-def Value(typecode_or_type, *args, lock=True):
+def Value(typecode_or_type, *args, lock=True, ctx=None):
'''
Return a synchronization wrapper for a Value
'''
@@ -71,12 +74,13 @@ def Value(typecode_or_type, *args, lock=True):
if lock is False:
return obj
if lock in (True, None):
- lock = RLock()
+ ctx = ctx or get_context()
+ lock = ctx.RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
- return synchronized(obj, lock)
+ return synchronized(obj, lock, ctx=ctx)
-def Array(typecode_or_type, size_or_initializer, *, lock=True):
+def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None):
'''
Return a synchronization wrapper for a RawArray
'''
@@ -84,25 +88,27 @@ def Array(typecode_or_type, size_or_initializer, *, lock=True):
if lock is False:
return obj
if lock in (True, None):
- lock = RLock()
+ ctx = ctx or get_context()
+ lock = ctx.RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
- return synchronized(obj, lock)
+ return synchronized(obj, lock, ctx=ctx)
def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
-def synchronized(obj, lock=None):
+def synchronized(obj, lock=None, ctx=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
+ ctx = ctx or get_context()
if isinstance(obj, ctypes._SimpleCData):
- return Synchronized(obj, lock)
+ return Synchronized(obj, lock, ctx)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
- return SynchronizedString(obj, lock)
- return SynchronizedArray(obj, lock)
+ return SynchronizedString(obj, lock, ctx)
+ return SynchronizedArray(obj, lock, ctx)
else:
cls = type(obj)
try:
@@ -112,7 +118,7 @@ def synchronized(obj, lock=None):
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
- return scls(obj, lock)
+ return scls(obj, lock, ctx)
#
# Functions for pickling/unpickling
@@ -172,9 +178,13 @@ class_cache = weakref.WeakKeyDictionary()
class SynchronizedBase(object):
- def __init__(self, obj, lock=None):
+ def __init__(self, obj, lock=None, ctx=None):
self._obj = obj
- self._lock = lock or RLock()
+ if lock:
+ self._lock = lock
+ else:
+ ctx = ctx or get_context(force=True)
+ self._lock = ctx.RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
diff --git a/Lib/multiprocessing/spawn.py b/Lib/multiprocessing/spawn.py
new file mode 100644
index 0000000000..336e47990f
--- /dev/null
+++ b/Lib/multiprocessing/spawn.py
@@ -0,0 +1,287 @@
+#
+# Code used to start processes when using the spawn or forkserver
+# start methods.
+#
+# multiprocessing/spawn.py
+#
+# Copyright (c) 2006-2008, R Oudkerk
+# Licensed to PSF under a Contributor Agreement.
+#
+
+import os
+import pickle
+import sys
+import runpy
+import types
+
+from . import get_start_method, set_start_method
+from . import process
+from . import util
+
+__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable',
+ 'get_preparation_data', 'get_command_line', 'import_main_path']
+
+#
+# _python_exe is the assumed path to the python executable.
+# People embedding Python want to modify it.
+#
+
+if sys.platform != 'win32':
+ WINEXE = False
+ WINSERVICE = False
+else:
+ WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
+ WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
+
+if WINSERVICE:
+ _python_exe = os.path.join(sys.exec_prefix, 'python.exe')
+else:
+ _python_exe = sys.executable
+
+def set_executable(exe):
+ global _python_exe
+ _python_exe = exe
+
+def get_executable():
+ return _python_exe
+
+#
+#
+#
+
+def is_forking(argv):
+ '''
+ Return whether commandline indicates we are forking
+ '''
+ if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
+ return True
+ else:
+ return False
+
+
+def freeze_support():
+ '''
+ Run code for process object if this in not the main process
+ '''
+ if is_forking(sys.argv):
+ kwds = {}
+ for arg in sys.argv[2:]:
+ name, value = arg.split('=')
+ if value == 'None':
+ kwds[name] = None
+ else:
+ kwds[name] = int(value)
+ spawn_main(**kwds)
+ sys.exit()
+
+
+def get_command_line(**kwds):
+ '''
+ Returns prefix of command line used for spawning a child process
+ '''
+ if getattr(sys, 'frozen', False):
+ return ([sys.executable, '--multiprocessing-fork'] +
+ ['%s=%r' % item for item in kwds.items()])
+ else:
+ prog = 'from multiprocessing.spawn import spawn_main; spawn_main(%s)'
+ prog %= ', '.join('%s=%r' % item for item in kwds.items())
+ opts = util._args_from_interpreter_flags()
+ return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork']
+
+
+def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None):
+ '''
+ Run code specifed by data received over pipe
+ '''
+ assert is_forking(sys.argv)
+ if sys.platform == 'win32':
+ import msvcrt
+ from .reduction import steal_handle
+ new_handle = steal_handle(parent_pid, pipe_handle)
+ fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
+ else:
+ from . import semaphore_tracker
+ semaphore_tracker._semaphore_tracker._fd = tracker_fd
+ fd = pipe_handle
+ exitcode = _main(fd)
+ sys.exit(exitcode)
+
+
+def _main(fd):
+ with os.fdopen(fd, 'rb', closefd=True) as from_parent:
+ process.current_process()._inheriting = True
+ try:
+ preparation_data = pickle.load(from_parent)
+ prepare(preparation_data)
+ self = pickle.load(from_parent)
+ finally:
+ del process.current_process()._inheriting
+ return self._bootstrap()
+
+
+def _check_not_importing_main():
+ if getattr(process.current_process(), '_inheriting', False):
+ raise RuntimeError('''
+ An attempt has been made to start a new process before the
+ current process has finished its bootstrapping phase.
+
+ This probably means that you are not using fork to start your
+ child processes and you have forgotten to use the proper idiom
+ in the main module:
+
+ if __name__ == '__main__':
+ freeze_support()
+ ...
+
+ The "freeze_support()" line can be omitted if the program
+ is not going to be frozen to produce an executable.''')
+
+
+def get_preparation_data(name):
+ '''
+ Return info about parent needed by child to unpickle process object
+ '''
+ _check_not_importing_main()
+ d = dict(
+ log_to_stderr=util._log_to_stderr,
+ authkey=process.current_process().authkey,
+ )
+
+ if util._logger is not None:
+ d['log_level'] = util._logger.getEffectiveLevel()
+
+ sys_path=sys.path.copy()
+ try:
+ i = sys_path.index('')
+ except ValueError:
+ pass
+ else:
+ sys_path[i] = process.ORIGINAL_DIR
+
+ d.update(
+ name=name,
+ sys_path=sys_path,
+ sys_argv=sys.argv,
+ orig_dir=process.ORIGINAL_DIR,
+ dir=os.getcwd(),
+ start_method=get_start_method(),
+ )
+
+ # Figure out whether to initialise main in the subprocess as a module
+ # or through direct execution (or to leave it alone entirely)
+ main_module = sys.modules['__main__']
+ main_mod_name = getattr(main_module.__spec__, "name", None)
+ if main_mod_name is not None:
+ d['init_main_from_name'] = main_mod_name
+ elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
+ main_path = getattr(main_module, '__file__', None)
+ if main_path is not None:
+ if (not os.path.isabs(main_path) and
+ process.ORIGINAL_DIR is not None):
+ main_path = os.path.join(process.ORIGINAL_DIR, main_path)
+ d['init_main_from_path'] = os.path.normpath(main_path)
+
+ return d
+
+#
+# Prepare current process
+#
+
+old_main_modules = []
+
+def prepare(data):
+ '''
+ Try to get current process ready to unpickle process object
+ '''
+ if 'name' in data:
+ process.current_process().name = data['name']
+
+ if 'authkey' in data:
+ process.current_process().authkey = data['authkey']
+
+ if 'log_to_stderr' in data and data['log_to_stderr']:
+ util.log_to_stderr()
+
+ if 'log_level' in data:
+ util.get_logger().setLevel(data['log_level'])
+
+ if 'sys_path' in data:
+ sys.path = data['sys_path']
+
+ if 'sys_argv' in data:
+ sys.argv = data['sys_argv']
+
+ if 'dir' in data:
+ os.chdir(data['dir'])
+
+ if 'orig_dir' in data:
+ process.ORIGINAL_DIR = data['orig_dir']
+
+ if 'start_method' in data:
+ set_start_method(data['start_method'])
+
+ if 'init_main_from_name' in data:
+ _fixup_main_from_name(data['init_main_from_name'])
+ elif 'init_main_from_path' in data:
+ _fixup_main_from_path(data['init_main_from_path'])
+
+# Multiprocessing module helpers to fix up the main module in
+# spawned subprocesses
+def _fixup_main_from_name(mod_name):
+ # __main__.py files for packages, directories, zip archives, etc, run
+ # their "main only" code unconditionally, so we don't even try to
+ # populate anything in __main__, nor do we make any changes to
+ # __main__ attributes
+ current_main = sys.modules['__main__']
+ if mod_name == "__main__" or mod_name.endswith(".__main__"):
+ return
+
+ # If this process was forked, __main__ may already be populated
+ if getattr(current_main.__spec__, "name", None) == mod_name:
+ return
+
+ # Otherwise, __main__ may contain some non-main code where we need to
+ # support unpickling it properly. We rerun it as __mp_main__ and make
+ # the normal __main__ an alias to that
+ old_main_modules.append(current_main)
+ main_module = types.ModuleType("__mp_main__")
+ main_content = runpy.run_module(mod_name,
+ run_name="__mp_main__",
+ alter_sys=True)
+ main_module.__dict__.update(main_content)
+ sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
+
+
+def _fixup_main_from_path(main_path):
+ # If this process was forked, __main__ may already be populated
+ current_main = sys.modules['__main__']
+
+ # Unfortunately, the main ipython launch script historically had no
+ # "if __name__ == '__main__'" guard, so we work around that
+ # by treating it like a __main__.py file
+ # See https://github.com/ipython/ipython/issues/4698
+ main_name = os.path.splitext(os.path.basename(main_path))[0]
+ if main_name == 'ipython':
+ return
+
+ # Otherwise, if __file__ already has the setting we expect,
+ # there's nothing more to do
+ if getattr(current_main, '__file__', None) == main_path:
+ return
+
+ # If the parent process has sent a path through rather than a module
+ # name we assume it is an executable script that may contain
+ # non-main code that needs to be executed
+ old_main_modules.append(current_main)
+ main_module = types.ModuleType("__mp_main__")
+ main_content = runpy.run_path(main_path,
+ run_name="__mp_main__")
+ main_module.__dict__.update(main_content)
+ sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
+
+
+def import_main_path(main_path):
+ '''
+ Set sys.modules['__main__'] to module at main_path
+ '''
+ _fixup_main_from_path(main_path)
diff --git a/Lib/multiprocessing/synchronize.py b/Lib/multiprocessing/synchronize.py
index 0faca78412..dea1cbd7f0 100644
--- a/Lib/multiprocessing/synchronize.py
+++ b/Lib/multiprocessing/synchronize.py
@@ -13,18 +13,20 @@ __all__ = [
import threading
import sys
-
+import tempfile
import _multiprocessing
-from multiprocessing.process import current_process
-from multiprocessing.util import register_after_fork, debug
-from multiprocessing.forking import assert_spawning, Popen
+
from time import time as _time
+from . import context
+from . import process
+from . import util
+
# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
try:
- from _multiprocessing import SemLock
+ from _multiprocessing import SemLock, sem_unlink
except (ImportError):
raise ImportError("This platform lacks a functioning sem_open" +
" implementation, therefore, the required" +
@@ -44,15 +46,47 @@ SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
class SemLock(object):
- def __init__(self, kind, value, maxvalue):
- sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
- debug('created semlock with handle %s' % sl.handle)
+ _rand = tempfile._RandomNameSequence()
+
+ def __init__(self, kind, value, maxvalue, *, ctx):
+ if ctx is None:
+ ctx = context._default_context.get_context()
+ name = ctx.get_start_method()
+ unlink_now = sys.platform == 'win32' or name == 'fork'
+ for i in range(100):
+ try:
+ sl = self._semlock = _multiprocessing.SemLock(
+ kind, value, maxvalue, self._make_name(),
+ unlink_now)
+ except FileExistsError:
+ pass
+ else:
+ break
+ else:
+ raise FileExistsError('cannot find name for semaphore')
+
+ util.debug('created semlock with handle %s' % sl.handle)
self._make_methods()
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
- register_after_fork(self, _after_fork)
+ util.register_after_fork(self, _after_fork)
+
+ if self._semlock.name is not None:
+ # We only get here if we are on Unix with forking
+ # disabled. When the object is garbage collected or the
+ # process shuts down we unlink the semaphore name
+ from .semaphore_tracker import register
+ register(self._semlock.name)
+ util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
+ exitpriority=0)
+
+ @staticmethod
+ def _cleanup(name):
+ from .semaphore_tracker import unregister
+ sem_unlink(name)
+ unregister(name)
def _make_methods(self):
self.acquire = self._semlock.acquire
@@ -65,23 +99,32 @@ class SemLock(object):
return self._semlock.__exit__(*args)
def __getstate__(self):
- assert_spawning(self)
+ context.assert_spawning(self)
sl = self._semlock
- return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue)
+ if sys.platform == 'win32':
+ h = context.get_spawning_popen().duplicate_for_child(sl.handle)
+ else:
+ h = sl.handle
+ return (h, sl.kind, sl.maxvalue, sl.name)
def __setstate__(self, state):
self._semlock = _multiprocessing.SemLock._rebuild(*state)
- debug('recreated blocker with handle %r' % state[0])
+ util.debug('recreated blocker with handle %r' % state[0])
self._make_methods()
+ @staticmethod
+ def _make_name():
+ return '%s-%s' % (process.current_process()._config['semprefix'],
+ next(SemLock._rand))
+
#
# Semaphore
#
class Semaphore(SemLock):
- def __init__(self, value=1):
- SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
+ def __init__(self, value=1, *, ctx):
+ SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)
def get_value(self):
return self._semlock._get_value()
@@ -99,8 +142,8 @@ class Semaphore(SemLock):
class BoundedSemaphore(Semaphore):
- def __init__(self, value=1):
- SemLock.__init__(self, SEMAPHORE, value, value)
+ def __init__(self, value=1, *, ctx):
+ SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)
def __repr__(self):
try:
@@ -116,13 +159,13 @@ class BoundedSemaphore(Semaphore):
class Lock(SemLock):
- def __init__(self):
- SemLock.__init__(self, SEMAPHORE, 1, 1)
+ def __init__(self, *, ctx):
+ SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
- name = current_process().name
+ name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
elif self._semlock._get_value() == 1:
@@ -141,13 +184,13 @@ class Lock(SemLock):
class RLock(SemLock):
- def __init__(self):
- SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)
+ def __init__(self, *, ctx):
+ SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
- name = current_process().name
+ name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
count = self._semlock._count()
@@ -167,15 +210,15 @@ class RLock(SemLock):
class Condition(object):
- def __init__(self, lock=None):
- self._lock = lock or RLock()
- self._sleeping_count = Semaphore(0)
- self._woken_count = Semaphore(0)
- self._wait_semaphore = Semaphore(0)
+ def __init__(self, lock=None, *, ctx):
+ self._lock = lock or ctx.RLock()
+ self._sleeping_count = ctx.Semaphore(0)
+ self._woken_count = ctx.Semaphore(0)
+ self._wait_semaphore = ctx.Semaphore(0)
self._make_methods()
def __getstate__(self):
- assert_spawning(self)
+ context.assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
@@ -289,9 +332,9 @@ class Condition(object):
class Event(object):
- def __init__(self):
- self._cond = Condition(Lock())
- self._flag = Semaphore(0)
+ def __init__(self, *, ctx):
+ self._cond = ctx.Condition(ctx.Lock())
+ self._flag = ctx.Semaphore(0)
def is_set(self):
self._cond.acquire()
@@ -340,11 +383,11 @@ class Event(object):
class Barrier(threading.Barrier):
- def __init__(self, parties, action=None, timeout=None):
+ def __init__(self, parties, action=None, timeout=None, *, ctx):
import struct
- from multiprocessing.heap import BufferWrapper
+ from .heap import BufferWrapper
wrapper = BufferWrapper(struct.calcsize('i') * 2)
- cond = Condition()
+ cond = ctx.Condition()
self.__setstate__((parties, action, timeout, cond, wrapper))
self._state = 0
self._count = 0
diff --git a/Lib/multiprocessing/util.py b/Lib/multiprocessing/util.py
index f5862b49a4..0b695e46e5 100644
--- a/Lib/multiprocessing/util.py
+++ b/Lib/multiprocessing/util.py
@@ -7,8 +7,6 @@
# Licensed to PSF under a Contributor Agreement.
#
-import sys
-import functools
import os
import itertools
import weakref
@@ -17,13 +15,13 @@ import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
-from multiprocessing.process import current_process, active_children
+from . import process
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
- 'SUBDEBUG', 'SUBWARNING',
+ 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',
]
#
@@ -71,8 +69,6 @@ def get_logger():
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
- logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
- logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
@@ -111,13 +107,14 @@ def log_to_stderr(level=None):
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
- if current_process()._tempdir is None:
+ tempdir = process.current_process()._config.get('tempdir')
+ if tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
- current_process()._tempdir = tempdir
- return current_process()._tempdir
+ process.current_process()._config['tempdir'] = tempdir
+ return tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
@@ -273,8 +270,8 @@ def is_exiting():
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
- active_children=active_children,
- current_process=current_process):
+ active_children=process.active_children,
+ current_process=process.current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
@@ -303,7 +300,7 @@ def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
# #9207.
for p in active_children():
- if p._daemonic:
+ if p.daemon:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
@@ -335,3 +332,36 @@ class ForkAwareLocal(threading.local):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
+
+#
+# Close fds except those specified
+#
+
+try:
+ MAXFD = os.sysconf("SC_OPEN_MAX")
+except Exception:
+ MAXFD = 256
+
+def close_all_fds_except(fds):
+ fds = list(fds) + [-1, MAXFD]
+ fds.sort()
+ assert fds[-1] == MAXFD, 'fd too large'
+ for i in range(len(fds) - 1):
+ os.closerange(fds[i]+1, fds[i+1])
+
+#
+# Start a program with only specified fds kept open
+#
+
+def spawnv_passfds(path, args, passfds):
+ import _posixsubprocess
+ passfds = sorted(passfds)
+ errpipe_read, errpipe_write = os.pipe()
+ try:
+ return _posixsubprocess.fork_exec(
+ args, [os.fsencode(path)], True, passfds, None, None,
+ -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
+ False, False, None)
+ finally:
+ os.close(errpipe_read)
+ os.close(errpipe_write)