summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiampaolo Rodola <g.rodola@gmail.com>2021-10-03 09:48:35 +0200
committerGiampaolo Rodola <g.rodola@gmail.com>2021-10-03 09:48:35 +0200
commitd01233263f046f07d5139a8611671525f74e3dd0 (patch)
treef7264da94ab977dc82c95b854c0d08ca75677b44
parent102a3dd115ef763920b7db0741d585155e8282ed (diff)
downloadpsutil-d01233263f046f07d5139a8611671525f74e3dd0.tar.gz
Fix #1991: process_iter() raise TypeError with multi threads.
When entering the function, use a copy() of the global dict, and do operations on that instead of the global object, then update the global object on function exit. Signed-off-by: Giampaolo Rodola <g.rodola@gmail.com>
-rw-r--r--HISTORY.rst2
-rw-r--r--psutil/__init__.py78
2 files changed, 41 insertions, 39 deletions
diff --git a/HISTORY.rst b/HISTORY.rst
index 660467cd..ee7803d1 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -29,6 +29,8 @@ XXXX-XX-XX
by Xuehai Pan)
- 1953_: [Windows] disk_partitions() crashes due to insufficient buffer len.
(patch by MaWe2019)
+- 1991_: process_iter() can raise TypeError if invoked from multiple threads
+ (not thread-safe).
5.8.0
=====
diff --git a/psutil/__init__.py b/psutil/__init__.py
index 92b6398a..41f9bf5a 100644
--- a/psutil/__init__.py
+++ b/psutil/__init__.py
@@ -1387,7 +1387,6 @@ def pid_exists(pid):
_pmap = {}
-_lock = threading.Lock()
def process_iter(attrs=None, ad_value=None):
@@ -1411,58 +1410,59 @@ def process_iter(attrs=None, ad_value=None):
If *attrs* is an empty list it will retrieve all process info
(slow).
"""
+ global _pmap
+
def add(pid):
proc = Process(pid)
if attrs is not None:
proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
- with _lock:
- _pmap[proc.pid] = proc
+ pmap[proc.pid] = proc
return proc
def remove(pid):
- with _lock:
- _pmap.pop(pid, None)
+ pmap.pop(pid, None)
+ pmap = _pmap.copy()
a = set(pids())
- b = set(_pmap.keys())
+ b = set(pmap.keys())
new_pids = a - b
gone_pids = b - a
for pid in gone_pids:
remove(pid)
-
- with _lock:
- ls = sorted(list(_pmap.items()) +
- list(dict.fromkeys(new_pids).items()))
-
- for pid, proc in ls:
- try:
- if proc is None: # new process
- yield add(pid)
- else:
- # use is_running() to check whether PID has been reused by
- # another process in which case yield a new Process instance
- if proc.is_running():
- if attrs is not None:
- proc.info = proc.as_dict(
- attrs=attrs, ad_value=ad_value)
- yield proc
- else:
+ try:
+ ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items()))
+ for pid, proc in ls:
+ try:
+ if proc is None: # new process
yield add(pid)
- except NoSuchProcess:
- remove(pid)
- except AccessDenied:
- # Process creation time can't be determined hence there's
- # no way to tell whether the pid of the cached process
- # has been reused. Just return the cached version.
- if proc is None and pid in _pmap:
- try:
- yield _pmap[pid]
- except KeyError:
- # If we get here it is likely that 2 threads were
- # using process_iter().
- pass
- else:
- raise
+ else:
+ # use is_running() to check whether PID has been
+ # reused by another process in which case yield a
+ # new Process instance
+ if proc.is_running():
+ if attrs is not None:
+ proc.info = proc.as_dict(
+ attrs=attrs, ad_value=ad_value)
+ yield proc
+ else:
+ yield add(pid)
+ except NoSuchProcess:
+ remove(pid)
+ except AccessDenied:
+ # Process creation time can't be determined hence there's
+ # no way to tell whether the pid of the cached process
+ # has been reused. Just return the cached version.
+ if proc is None and pid in pmap:
+ try:
+ yield pmap[pid]
+ except KeyError:
+ # If we get here it is likely that 2 threads were
+ # using process_iter().
+ pass
+ else:
+ raise
+ finally:
+ _pmap = pmap
def wait_procs(procs, timeout=None, callback=None):