summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbrian.quinlan <devnull@localhost>2010-02-23 08:29:17 +0000
committerbrian.quinlan <devnull@localhost>2010-02-23 08:29:17 +0000
commite69b63aebbdc8a53866e8acb1e94f983b4c37c14 (patch)
tree5f5194cb13ec57c8e6d63ff49cf1bbad44e0d826
parent93d583e9a74ab087821d06d862543db7c9ef043f (diff)
downloadfutures-e69b63aebbdc8a53866e8acb1e94f983b4c37c14.tar.gz
Documentation updates.
-rw-r--r--PEP.txt65
-rw-r--r--docs/index.rst23
-rw-r--r--python2/futures/process.py16
-rw-r--r--python2/futures/thread.py10
-rw-r--r--python2/test_futures.py24
-rw-r--r--python3/futures/process.py16
-rw-r--r--python3/futures/thread.py10
-rw-r--r--python3/test_futures.py24
8 files changed, 101 insertions, 87 deletions
diff --git a/PEP.txt b/PEP.txt
index 483c25f..5048405 100644
--- a/PEP.txt
+++ b/PEP.txt
@@ -5,7 +5,7 @@ Last-Modified: $Date$
Author: Brian Quinlan <brian@sweetapp.com>
Status: Draft
Type: Standards Track
-Content-Type: text/x-rst
+Content-Type: text/x-rstÄ
Created: 16-Oct-2009
Python-Version: 3.2
Post-History:
@@ -80,7 +80,7 @@ Web Crawl Example
def load_url(url, timeout):
return urllib.request.urlopen(url, timeout=timeout).read()
- with futures.ThreadPoolExecutor(max_threads=5) as executor:
+ with futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_url = dict((executor.submit(load_url, url, 60), url)
for url in URLS)
@@ -115,32 +115,41 @@ a `Future` instance representing the execution of the function.
Equivalent to map(*func*, *\*iterables*) but executed asynchronously and
possibly out-of-order. The returned iterator raises a `TimeoutError` if
`__next__()` is called and the result isn't available after *timeout* seconds
-from the original call to `run_to_results()`. If *timeout* is not specified or
+from the original call to `map()`. If *timeout* is not specified or
``None`` then there is no limit to the wait time. If a call raises an exception
then that exception will be raised when its value is retrieved from the
iterator.
-`Executor.shutdown(wait=False)`
+`shutdown(wait=True)`
Signal the executor that it should free any resources that it is using when
the currently pending futures are done executing. Calls to
-`Executor.run_to_futures`, `Executor.run_to_results` and
-`Executor.map` made after shutdown will raise `RuntimeError`.
+`Executor.submit` and `Executor.map` and made after shutdown will raise
+`RuntimeError`.
If wait is `True` then the executor will not return until all the pending
futures are done executing and the resources associated with the executor
have been freed.
+`__enter__()`
+`__exit__(exc_type, exc_val, exc_tb)`
+
+When using an executor as a context manager, `__exit__` will call
+`Executor.shutdown(wait=True)`.
+
+
ProcessPoolExecutor
'''''''''''''''''''
The `ProcessPoolExecutor` class is an `Executor` subclass that uses a pool of
-processes to execute calls asynchronously.
+processes to execute calls asynchronously. The callable objects and arguments
+passed to `ProcessPoolExecutor.submit` must be serializeable according to the
+same limitations as the multiprocessing module.
-`__init__(max_processes)`
+`__init__(max_workers)`
-Executes calls asynchronously using a pool of a most *max_processes*
-processes. If *max_processes* is ``None`` or not given then as many worker
+Executes calls asynchronously using a pool of a most *max_workers*
+processes. If *max_workers* is ``None`` or not given then as many worker
processes will be created as the machine has processors.
ThreadPoolExecutor
@@ -149,9 +158,9 @@ ThreadPoolExecutor
The `ThreadPoolExecutor` class is an `Executor` subclass that uses a pool of
threads to execute calls asynchronously.
-`__init__(max_threads)`
+`__init__(max_workers)`
-Executes calls asynchronously using a pool of at most *max_threads* threads.
+Executes calls asynchronously using a pool of at most *max_workers* threads.
Future Objects
''''''''''''''
@@ -165,7 +174,7 @@ Attempt to cancel the call. If the call is currently being executed then
it cannot be cancelled and the method will return `False`, otherwise the call
will be cancelled and the method will return `True`.
-`Future.cancelled()`
+`cancelled()`
Return `True` if the call was successfully cancelled.
@@ -198,19 +207,15 @@ be raised.
If the call completed without raising then ``None`` is returned.
-`index`
-
-int indicating the index of the future in its `FutureList`.
-
Module Functions
''''''''''''''''
`wait(fs, timeout=None, return_when=ALL_COMPLETED)`
Wait for the `Future` instances in the given sequence to complete. Returns a
-2-tuple of sets. The first set contains the futures that completed (finished
-or were cancelled) before the wait completed. The second set contains
-uncompleted futures.
+named 2-tuple of sets. The first set, named "finished", contains the futures
+that completed (finished or were cancelled) before the wait completed. The
+second set, named "not_finished", contains uncompleted futures.
This method should always be called using keyword arguments, which are:
@@ -226,11 +231,12 @@ following constants:
============================= ==================================================
Constant Description
============================= ==================================================
-`FIRST_COMPLETED` The method will return when any call finishes.
-`FIRST_EXCEPTION` The method will return when any call raises an
- exception or when all calls finish.
+`FIRST_COMPLETED` The method will return when any future finishes or
+ is cancelled.
+`FIRST_EXCEPTION` The method will return when any future finishes by
+ raising an exception. If not future raises an
+ exception then it is equivalent to ALL_COMPLETED.
`ALL_COMPLETED` The method will return when all calls finish.
-`RETURN_IMMEDIATELY` The method will return immediately.
============================= ==================================================
`as_completed(fs, timeout=None)`
@@ -273,11 +279,13 @@ method calls as asynchronous. A proxy result would be returned while the
operation is eagerly evaluated asynchronously, and execution would only
block if the proxy object were used before the operation completed.
+Anh Hai Trinh proposed a simpler but more limited API concept [5]_.
+
========================
Reference Implementation
========================
-The reference implementation [5]_ contains a complete implementation of the
+The reference implementation [6]_ contains a complete implementation of the
proposed design. It has been tested on Linux and Mac OS X.
==========
@@ -307,8 +315,13 @@ References
.. [5]
- Reference `futures` implementation `http://code.google.com/p/pythonfutures`
+ A discussion of `stream`, a similar concept proposed by Anh Hai Trinh
+ `http://www.mail-archive.com/stdlib-sig@python.org/msg00480.html`
+.. [6]
+
+ Reference `futures` implementation `http://code.google.com/p/pythonfutures`
+
=========
Copyright
=========
diff --git a/docs/index.rst b/docs/index.rst
index 0a37a61..356c059 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -26,7 +26,7 @@ subclasses: :class:`ThreadPoolExecutor` and :class:`ProcessPoolExecutor`.
::
- with ThreadPoolExecutor(max_threads=1) as executor:
+ with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(pow, 323, 1235)
print(future.result())
@@ -58,7 +58,7 @@ subclasses: :class:`ThreadPoolExecutor` and :class:`ProcessPoolExecutor`.
::
import shutil
- with ThreadPoolExecutor(max_threads=4) as e:
+ with ThreadPoolExecutor(max_workers=4) as e:
e.submit(shutil.copy, 'src1.txt', 'dest1.txt')
e.submit(shutil.copy, 'src2.txt', 'dest2.txt')
e.submit(shutil.copy, 'src3.txt', 'dest3.txt')
@@ -71,9 +71,9 @@ ThreadPoolExecutor Objects
The :class:`ThreadPoolExecutor` class is an :class:`Executor` subclass that uses
a pool of threads to execute calls asynchronously.
-.. class:: ThreadPoolExecutor(max_threads)
+.. class:: ThreadPoolExecutor(max_workers)
- Executes calls asynchronously using at pool of at most *max_threads* threads.
+ Executes calls asynchronously using at pool of at most *max_workers* threads.
.. _threadpoolexecutor-example:
@@ -93,7 +93,7 @@ ThreadPoolExecutor Example
def load_url(url, timeout):
return urllib.request.urlopen(url, timeout=timeout).read()
- with futures.ThreadPoolExecutor(max_threads=5) as executor:
+ with futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_url = dict((executor.submit(load_url, url, 60), url)
for url in URLS)
@@ -113,10 +113,10 @@ uses a pool of processes to execute calls asynchronously.
allows it to side-step the :term:`Global Interpreter Lock` but also means that
only picklable objects can be executed and returned.
-.. class:: ProcessPoolExecutor(max_processes=None)
+.. class:: ProcessPoolExecutor(max_workers=None)
- Executes calls asynchronously using a pool of at most *max_processes*
- processes. If *max_processes* is ``None`` or not given then as many worker
+ Executes calls asynchronously using a pool of at most *max_workers*
+ processes. If *max_workers* is ``None`` or not given then as many worker
processes will be created as the machine has processors.
.. _processpoolexecutor-example:
@@ -201,9 +201,10 @@ Module Functions
.. function:: wait(fs, timeout=None, return_when=ALL_COMPLETED)
Wait for the :class:`Future` instances in the given sequence to complete.
- Returns a 2-tuple of sets. The first set contains the futures that completed
- (finished or were cancelled) before the wait completed. The second set
- contains uncompleted futures.
+ Returns a named 2-tuple of sets. The first set, named "finished", contains
+ the futures that completed (finished or were cancelled) before the wait
+ completed. The second set, named "not_finished", contains uncompleted
+ futures.
This method should always be called using keyword arguments, which are:
diff --git a/python2/futures/process.py b/python2/futures/process.py
index 7f1b153..f0d7fdf 100644
--- a/python2/futures/process.py
+++ b/python2/futures/process.py
@@ -86,7 +86,7 @@ def _remove_dead_thread_references():
Should be called periodically to prevent memory leaks in scenarios such as:
>>> while True:
- >>> ... t = ThreadPoolExecutor(max_threads=5)
+ >>> ... t = ThreadPoolExecutor(max_workers=5)
>>> ... t.map(int, ['1', '2', '3', '4', '5'])
"""
for thread_reference in set(_thread_references):
@@ -253,25 +253,25 @@ def _queue_manangement_worker(executor_reference,
result_item.result)
class ProcessPoolExecutor(Executor):
- def __init__(self, max_processes=None):
+ def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
- max_processes: The maximum number of processes that can be used to
+ max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_remove_dead_thread_references()
- if max_processes is None:
- self._max_processes = multiprocessing.cpu_count()
+ if max_workers is None:
+ self._max_workers = multiprocessing.cpu_count()
else:
- self._max_processes = max_processes
+ self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
- self._call_queue = multiprocessing.Queue(self._max_processes +
+ self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
self._result_queue = multiprocessing.Queue()
self._work_ids = Queue.Queue()
@@ -301,7 +301,7 @@ class ProcessPoolExecutor(Executor):
_thread_references.add(weakref.ref(self._queue_management_thread))
def _adjust_process_count(self):
- for _ in range(len(self._processes), self._max_processes):
+ for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
diff --git a/python2/futures/thread.py b/python2/futures/thread.py
index 4071574..4f410fe 100644
--- a/python2/futures/thread.py
+++ b/python2/futures/thread.py
@@ -46,7 +46,7 @@ def _remove_dead_thread_references():
Should be called periodically to prevent thread objects from accumulating in
scenarios such as:
>>> while True:
- >>> ... t = ThreadPoolExecutor(max_threads=5)
+ >>> ... t = ThreadPoolExecutor(max_workers=5)
>>> ... t.map(int, ['1', '2', '3', '4', '5'])
"""
for thread_reference in set(_thread_references):
@@ -109,16 +109,16 @@ def _worker(executor_reference, work_queue):
LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(Executor):
- def __init__(self, max_threads):
+ def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
- max_threads: The maximum number of threads that can be used to
+ max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
_remove_dead_thread_references()
- self._max_threads = max_threads
+ self._max_workers = max_workers
self._work_queue = Queue.Queue()
self._threads = set()
self._shutdown = False
@@ -126,7 +126,7 @@ class ThreadPoolExecutor(Executor):
def _adjust_thread_count(self):
for _ in range(len(self._threads),
- min(self._max_threads, self._work_queue.qsize())):
+ min(self._max_workers, self._work_queue.qsize())):
t = threading.Thread(target=_worker,
args=(weakref.ref(self), self._work_queue))
t.setDaemon(True)
diff --git a/python2/test_futures.py b/python2/test_futures.py
index e4bdf36..cf74286 100644
--- a/python2/test_futures.py
+++ b/python2/test_futures.py
@@ -109,7 +109,7 @@ class ExecutorShutdownTest(unittest.TestCase):
class ThreadPoolShutdownTest(ExecutorShutdownTest):
def setUp(self):
- self.executor = futures.ThreadPoolExecutor(max_threads=5)
+ self.executor = futures.ThreadPoolExecutor(max_workers=5)
def tearDown(self):
self.executor.shutdown()
@@ -122,7 +122,7 @@ class ThreadPoolShutdownTest(ExecutorShutdownTest):
t.join()
def test_context_manager_shutdown(self):
- with futures.ThreadPoolExecutor(max_threads=5) as e:
+ with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
@@ -131,7 +131,7 @@ class ThreadPoolShutdownTest(ExecutorShutdownTest):
t.join()
def test_del_shutdown(self):
- executor = futures.ThreadPoolExecutor(max_threads=5)
+ executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
@@ -141,7 +141,7 @@ class ThreadPoolShutdownTest(ExecutorShutdownTest):
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def setUp(self):
- self.executor = futures.ProcessPoolExecutor(max_processes=5)
+ self.executor = futures.ProcessPoolExecutor(max_workers=5)
def tearDown(self):
self.executor.shutdown()
@@ -156,7 +156,7 @@ class ProcessPoolShutdownTest(ExecutorShutdownTest):
p.join()
def test_context_manager_shutdown(self):
- with futures.ProcessPoolExecutor(max_processes=5) as e:
+ with futures.ProcessPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
@@ -166,7 +166,7 @@ class ProcessPoolShutdownTest(ExecutorShutdownTest):
p.join()
def test_del_shutdown(self):
- executor = futures.ProcessPoolExecutor(max_processes=5)
+ executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
@@ -242,14 +242,14 @@ class WaitsTest(unittest.TestCase):
class ThreadPoolWaitTests(WaitsTest):
def setUp(self):
- self.executor = futures.ThreadPoolExecutor(max_threads=1)
+ self.executor = futures.ThreadPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown()
class ProcessPoolWaitTests(WaitsTest):
def setUp(self):
- self.executor = futures.ProcessPoolExecutor(max_processes=1)
+ self.executor = futures.ProcessPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown()
@@ -377,14 +377,14 @@ class CancelTests(unittest.TestCase):
class ThreadPoolCancelTests(CancelTests):
def setUp(self):
- self.executor = futures.ThreadPoolExecutor(max_threads=1)
+ self.executor = futures.ThreadPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown()
class ProcessPoolCancelTests(WaitsTest):
def setUp(self):
- self.executor = futures.ProcessPoolExecutor(max_processes=1)
+ self.executor = futures.ProcessPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown()
@@ -496,14 +496,14 @@ class ExecutorTest(unittest.TestCase):
class ThreadPoolExecutorTest(ExecutorTest):
def setUp(self):
- self.executor = futures.ThreadPoolExecutor(max_threads=1)
+ self.executor = futures.ThreadPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown()
class ProcessPoolExecutorTest(ExecutorTest):
def setUp(self):
- self.executor = futures.ProcessPoolExecutor(max_processes=1)
+ self.executor = futures.ProcessPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown()
diff --git a/python3/futures/process.py b/python3/futures/process.py
index a763223..4d3b643 100644
--- a/python3/futures/process.py
+++ b/python3/futures/process.py
@@ -81,7 +81,7 @@ def _remove_dead_thread_references():
Should be called periodically to prevent memory leaks in scenarios such as:
>>> while True:
- >>> ... t = ThreadPoolExecutor(max_threads=5)
+ >>> ... t = ThreadPoolExecutor(max_workers=5)
>>> ... t.map(int, ['1', '2', '3', '4', '5'])
"""
for thread_reference in set(_thread_references):
@@ -247,25 +247,25 @@ def _queue_manangement_worker(executor_reference,
work_item.future._set_result(result_item.result)
class ProcessPoolExecutor(Executor):
- def __init__(self, max_processes=None):
+ def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
- max_processes: The maximum number of processes that can be used to
+ max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_remove_dead_thread_references()
- if max_processes is None:
- self._max_processes = multiprocessing.cpu_count()
+ if max_workers is None:
+ self._max_workers = multiprocessing.cpu_count()
else:
- self._max_processes = max_processes
+ self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
- self._call_queue = multiprocessing.Queue(self._max_processes +
+ self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
self._result_queue = multiprocessing.Queue()
self._work_ids = queue.Queue()
@@ -295,7 +295,7 @@ class ProcessPoolExecutor(Executor):
_thread_references.add(weakref.ref(self._queue_management_thread))
def _adjust_process_count(self):
- for _ in range(len(self._processes), self._max_processes):
+ for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
diff --git a/python3/futures/thread.py b/python3/futures/thread.py
index 044552f..b6ea1ba 100644
--- a/python3/futures/thread.py
+++ b/python3/futures/thread.py
@@ -43,7 +43,7 @@ def _remove_dead_thread_references():
Should be called periodically to prevent memory leaks in scenarios such as:
>>> while True:
- >>> ... t = ThreadPoolExecutor(max_threads=5)
+ >>> ... t = ThreadPoolExecutor(max_workers=5)
>>> ... t.map(int, ['1', '2', '3', '4', '5'])
"""
for thread_reference in set(_thread_references):
@@ -98,16 +98,16 @@ def _worker(executor_reference, work_queue):
LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(Executor):
- def __init__(self, max_threads):
+ def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
- max_threads: The maximum number of threads that can be used to
+ max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
_remove_dead_thread_references()
- self._max_threads = max_threads
+ self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
@@ -129,7 +129,7 @@ class ThreadPoolExecutor(Executor):
def _adjust_thread_count(self):
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
- if len(self._threads) < self._max_threads:
+ if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self), self._work_queue))
t.daemon = True
diff --git a/python3/test_futures.py b/python3/test_futures.py
index 2a36ba4..6241d0f 100644
--- a/python3/test_futures.py
+++ b/python3/test_futures.py
@@ -112,7 +112,7 @@ class ExecutorShutdownTest(unittest.TestCase):
class ThreadPoolShutdownTest(ExecutorShutdownTest):
def setUp(self):
- self.executor = futures.ThreadPoolExecutor(max_threads=5)
+ self.executor = futures.ThreadPoolExecutor(max_workers=5)
def tearDown(self):
self.executor.shutdown(wait=True)
@@ -125,7 +125,7 @@ class ThreadPoolShutdownTest(ExecutorShutdownTest):
t.join()
def test_context_manager_shutdown(self):
- with futures.ThreadPoolExecutor(max_threads=5) as e:
+ with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
@@ -134,7 +134,7 @@ class ThreadPoolShutdownTest(ExecutorShutdownTest):
t.join()
def test_del_shutdown(self):
- executor = futures.ThreadPoolExecutor(max_threads=5)
+ executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
@@ -144,7 +144,7 @@ class ThreadPoolShutdownTest(ExecutorShutdownTest):
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def setUp(self):
- self.executor = futures.ProcessPoolExecutor(max_processes=5)
+ self.executor = futures.ProcessPoolExecutor(max_workers=5)
def tearDown(self):
self.executor.shutdown(wait=True)
@@ -159,7 +159,7 @@ class ProcessPoolShutdownTest(ExecutorShutdownTest):
p.join()
def test_context_manager_shutdown(self):
- with futures.ProcessPoolExecutor(max_processes=5) as e:
+ with futures.ProcessPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
@@ -169,7 +169,7 @@ class ProcessPoolShutdownTest(ExecutorShutdownTest):
p.join()
def test_del_shutdown(self):
- executor = futures.ProcessPoolExecutor(max_processes=5)
+ executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
@@ -407,14 +407,14 @@ class WaitTests(unittest.TestCase):
class ThreadPoolWaitTests(WaitTests):
def setUp(self):
- self.executor = futures.ThreadPoolExecutor(max_threads=1)
+ self.executor = futures.ThreadPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
class ProcessPoolWaitTests(WaitTests):
def setUp(self):
- self.executor = futures.ProcessPoolExecutor(max_processes=1)
+ self.executor = futures.ProcessPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
@@ -477,14 +477,14 @@ class AsCompletedTests(unittest.TestCase):
class ThreadPoolAsCompletedTests(AsCompletedTests):
def setUp(self):
- self.executor = futures.ThreadPoolExecutor(max_threads=1)
+ self.executor = futures.ThreadPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
class ProcessPoolAsCompletedTests(AsCompletedTests):
def setUp(self):
- self.executor = futures.ProcessPoolExecutor(max_processes=1)
+ self.executor = futures.ProcessPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
@@ -524,14 +524,14 @@ class ExecutorTest(unittest.TestCase):
class ThreadPoolExecutorTest(ExecutorTest):
def setUp(self):
- self.executor = futures.ThreadPoolExecutor(max_threads=1)
+ self.executor = futures.ThreadPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
class ProcessPoolExecutorTest(ExecutorTest):
def setUp(self):
- self.executor = futures.ProcessPoolExecutor(max_processes=1)
+ self.executor = futures.ProcessPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)