From aee1b7522f16eee04678179d2c58f17dff135c8a Mon Sep 17 00:00:00 2001 From: Angelos Evripiotis Date: Wed, 5 Jun 2019 14:10:45 +0100 Subject: Rename (spawn, fork) -> 'start process' Avoid confusion by not referring to starting another process as 'spawning'. Note that 'spawn' is a process creation method, which is an alternative to forking. Say 'create child process' instead of 'fork' where it doesn't harm understanding. Although we currently only use the 'fork' method for creating subprocesses, there are reasons for us to support 'spawn' in the future. More information on forking and spawning: https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods --- doc/source/arch_scheduler.rst | 4 ++-- src/buildstream/_fuse/mount.py | 6 +++--- src/buildstream/_scheduler/jobs/elementjob.py | 2 +- src/buildstream/_scheduler/jobs/job.py | 23 +++++++++++------------ src/buildstream/_scheduler/scheduler.py | 16 ++++++++-------- src/buildstream/_signals.py | 8 ++++---- 6 files changed, 29 insertions(+), 30 deletions(-) diff --git a/doc/source/arch_scheduler.rst b/doc/source/arch_scheduler.rst index 59efd68c2..4b0501495 100644 --- a/doc/source/arch_scheduler.rst +++ b/doc/source/arch_scheduler.rst @@ -8,13 +8,13 @@ dispatching *Jobs* to complete tasks on behalf of *Queues*. Jobs ~~~~ -The basic functionality of spawning tasks is implemented by the base Job +The basic functionality of multiprocessing tasks is implemented by the base Job class, which is derived in a few ways but for now we'll only talk about the ElementJob type since that is the most centric. The Job class has the following responsibilities: -* Spawning the given job as a subprocess. +* Starting the given job as a subprocess. * Offering an abstract method for subclasses to handle the outcome of a job when it completes. diff --git a/src/buildstream/_fuse/mount.py b/src/buildstream/_fuse/mount.py index e31684100..ac5fb2295 100644 --- a/src/buildstream/_fuse/mount.py +++ b/src/buildstream/_fuse/mount.py @@ -68,8 +68,8 @@ class FuseMountError(Exception): # to know when the mount is done, there is no callback for that # # The solution we use here without digging too deep into the -# low level fuse API, is to fork a child process which will -# fun the fuse loop in foreground, and we block the parent +# low level fuse API, is to start a child process which will +# run the fuse loop in foreground, and we block the parent # process until the volume is mounted with a busy loop with timeouts. # class Mount(): @@ -104,7 +104,7 @@ class Mount(): self.__mountpoint = mountpoint self.__process = Process(target=self.__run_fuse) - # Ensure the child fork() does not inherit our signal handlers, if the + # Ensure the child process does not inherit our signal handlers, if the # child wants to handle a signal then it will first set its own # handler, and then unblock it. with _signals.blocked([signal.SIGTERM, signal.SIGTSTP, signal.SIGINT], ignore=False): diff --git a/src/buildstream/_scheduler/jobs/elementjob.py b/src/buildstream/_scheduler/jobs/elementjob.py index 2be0aa8f9..a535f55db 100644 --- a/src/buildstream/_scheduler/jobs/elementjob.py +++ b/src/buildstream/_scheduler/jobs/elementjob.py @@ -25,7 +25,7 @@ from .job import Job, ChildJob # ElementJob() # -# A job to run an element's commands. When this job is spawned +# A job to run an element's commands. When this job is started # `action_cb` will be called, and when it completes `complete_cb` will # be called. # diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py index 88156f3bf..91721d34d 100644 --- a/src/buildstream/_scheduler/jobs/job.py +++ b/src/buildstream/_scheduler/jobs/job.py @@ -136,11 +136,11 @@ class Job(): self._message_unique_id = None self._task_id = None - # spawn() + # start() # - # Spawns the job. + # Starts the job. # - def spawn(self): + def start(self): self._queue = multiprocessing.Queue() @@ -157,7 +157,6 @@ class Job(): self._task_id, ) - # Spawn the process self._process = Process(target=child_job.child_action, args=[self._queue]) # Block signals which are handled in the main process such that @@ -260,13 +259,13 @@ class Job(): try: # Use SIGTSTP so that child processes may handle and propagate - # it to processes they spawn that become session leaders + # it to processes they start that become session leaders. os.kill(self._process.pid, signal.SIGTSTP) - # For some reason we receive exactly one suspend event for every - # SIGTSTP we send to the child fork(), even though the child forks - # are setsid(). We keep a count of these so we can ignore them - # in our event loop suspend_event() + # For some reason we receive exactly one suspend event for + # every SIGTSTP we send to the child process, even though the + # child processes are setsid(). We keep a count of these so we + # can ignore them in our event loop suspend_event(). self._scheduler.internal_stops += 1 self._suspended = True except ProcessLookupError: @@ -419,7 +418,7 @@ class Job(): retry_flag = returncode == RC_FAIL if retry_flag and (self._tries <= self._max_retries) and not self._scheduler.terminated: - self.spawn() + self.start() return # Resolve the outward facing overall job completion status @@ -609,8 +608,8 @@ class ChildJob(): # child_process() # - # This will be executed after fork(), and is intended to perform - # the job's task. + # This will be executed after starting the child process, and is intended + # to perform the job's task. # # Returns: # (any): A simple object (must be pickle-able, i.e. strings, lists, diff --git a/src/buildstream/_scheduler/scheduler.py b/src/buildstream/_scheduler/scheduler.py index 50ad7f07a..80e14dcd0 100644 --- a/src/buildstream/_scheduler/scheduler.py +++ b/src/buildstream/_scheduler/scheduler.py @@ -303,18 +303,18 @@ class Scheduler(): if artifacts.full(): self._sched_cache_size_job(exclusive=True) - # _spawn_job() + # _start_job() # # Spanws a job # # Args: - # job (Job): The job to spawn + # job (Job): The job to start # - def _spawn_job(self, job): + def _start_job(self, job): self._active_jobs.append(job) if self._job_start_callback: self._job_start_callback(job) - job.spawn() + job.start() # Callback for the cache size job def _cache_size_job_complete(self, status, cache_size): @@ -373,7 +373,7 @@ class Scheduler(): self._cleanup_running = \ CleanupJob(self, _ACTION_NAME_CLEANUP, 'cleanup/cleanup', complete_cb=self._cleanup_job_complete) - self._spawn_job(self._cleanup_running) + self._start_job(self._cleanup_running) # _sched_cache_size_job() # @@ -414,7 +414,7 @@ class Scheduler(): CacheSizeJob(self, _ACTION_NAME_CACHE_SIZE, 'cache_size/cache_size', complete_cb=self._cache_size_job_complete) - self._spawn_job(self._cache_size_running) + self._start_job(self._cache_size_running) # _sched_queue_jobs() # @@ -458,10 +458,10 @@ class Scheduler(): # If that happens, do another round. process_queues = any(q.dequeue_ready() for q in self.queues) - # Spawn the jobs + # Start the jobs # for job in ready: - self._spawn_job(job) + self._start_job(job) # _sched() # diff --git a/src/buildstream/_signals.py b/src/buildstream/_signals.py index 41b100f93..2df2c7915 100644 --- a/src/buildstream/_signals.py +++ b/src/buildstream/_signals.py @@ -26,8 +26,8 @@ from collections import deque # Global per process state for handling of sigterm/sigtstp/sigcont, -# note that it is expected that this only ever be used by processes -# the scheduler forks off, not the main process +# note that it is expected that this only ever be used by new processes +# the scheduler starts, not the main process. terminator_stack = deque() suspendable_stack = deque() @@ -50,7 +50,7 @@ def terminator_handler(signal_, frame): file=sys.stderr) # Use special exit here, terminate immediately, recommended - # for precisely this situation where child forks are teminated. + # for precisely this situation where child processes are teminated. os._exit(-1) @@ -125,7 +125,7 @@ def suspend_handler(sig, frame): # suspend_callback (callable): A function to call as process suspend time. # resume_callback (callable): A function to call as process resume time. # -# This must be used in code blocks which spawn processes that become +# This must be used in code blocks which start processes that become # their own session leader. In these cases, SIGSTOP and SIGCONT need # to be propagated to the child process group. # -- cgit v1.2.1