From: Peter Zijlstra Date: Fri, 23 Oct 2020 12:12:06 +0200 Subject: [PATCH 08/19] sched: Massage set_cpus_allowed() Thread a u32 flags word through the *set_cpus_allowed*() callchain. This will allow adding behavioural tweaks for future users. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Sebastian Andrzej Siewior --- kernel/sched/core.c | 28 ++++++++++++++++++---------- kernel/sched/deadline.c | 5 +++-- kernel/sched/sched.h | 7 +++++-- 3 files changed, 26 insertions(+), 14 deletions(-) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1824,13 +1824,14 @@ static int migration_cpu_stop(void *data * sched_class::set_cpus_allowed must do the below, but is not required to * actually call this function. */ -void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) +void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags) { cpumask_copy(&p->cpus_mask, new_mask); p->nr_cpus_allowed = cpumask_weight(new_mask); } -void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +static void +__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags) { struct rq *rq = task_rq(p); bool queued, running; @@ -1851,7 +1852,7 @@ void do_set_cpus_allowed(struct task_str if (running) put_prev_task(rq, p); - p->sched_class->set_cpus_allowed(p, new_mask); + p->sched_class->set_cpus_allowed(p, new_mask, flags); if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); @@ -1859,6 +1860,11 @@ void do_set_cpus_allowed(struct task_str set_next_task(rq, p); } +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +{ + __do_set_cpus_allowed(p, new_mask, 0); +} + /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on @@ -1869,7 +1875,8 @@ void do_set_cpus_allowed(struct task_str * call is not atomic; no spinlocks may be held. */ static int __set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask, bool check) + const struct cpumask *new_mask, + u32 flags) { const struct cpumask *cpu_valid_mask = cpu_active_mask; unsigned int dest_cpu; @@ -1891,7 +1898,7 @@ static int __set_cpus_allowed_ptr(struct * Must re-check here, to close a race against __kthread_bind(), * sched_setaffinity() is not guaranteed to observe the flag. */ - if (check && (p->flags & PF_NO_SETAFFINITY)) { + if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { ret = -EINVAL; goto out; } @@ -1910,7 +1917,7 @@ static int __set_cpus_allowed_ptr(struct goto out; } - do_set_cpus_allowed(p, new_mask); + __do_set_cpus_allowed(p, new_mask, flags); if (p->flags & PF_KTHREAD) { /* @@ -1947,7 +1954,7 @@ static int __set_cpus_allowed_ptr(struct int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { - return __set_cpus_allowed_ptr(p, new_mask, false); + return __set_cpus_allowed_ptr(p, new_mask, 0); } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); @@ -2406,7 +2413,8 @@ void sched_set_stop_task(int cpu, struct #else static inline int __set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask, bool check) + const struct cpumask *new_mask, + u32 flags) { return set_cpus_allowed_ptr(p, new_mask); } @@ -6012,7 +6020,7 @@ long sched_setaffinity(pid_t pid, const } #endif again: - retval = __set_cpus_allowed_ptr(p, new_mask, true); + retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK); if (!retval) { cpuset_cpus_allowed(p, cpus_allowed); @@ -6591,7 +6599,7 @@ void init_idle(struct task_struct *idle, * * And since this is boot we can forgo the serialization. */ - set_cpus_allowed_common(idle, cpumask_of(cpu)); + set_cpus_allowed_common(idle, cpumask_of(cpu), 0); #endif /* * We're having a chicken and egg problem, even though we are --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2307,7 +2307,8 @@ static void task_woken_dl(struct rq *rq, } static void set_cpus_allowed_dl(struct task_struct *p, - const struct cpumask *new_mask) + const struct cpumask *new_mask, + u32 flags) { struct root_domain *src_rd; struct rq *rq; @@ -2336,7 +2337,7 @@ static void set_cpus_allowed_dl(struct t raw_spin_unlock(&src_dl_b->lock); } - set_cpus_allowed_common(p, new_mask); + set_cpus_allowed_common(p, new_mask, flags); } /* Assumes rq->lock is held */ --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1808,7 +1808,8 @@ struct sched_class { void (*task_woken)(struct rq *this_rq, struct task_struct *task); void (*set_cpus_allowed)(struct task_struct *p, - const struct cpumask *newmask); + const struct cpumask *newmask, + u32 flags); void (*rq_online)(struct rq *rq); void (*rq_offline)(struct rq *rq); @@ -1901,7 +1902,9 @@ extern void update_group_capacity(struct extern void trigger_load_balance(struct rq *rq); -extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); +#define SCA_CHECK 0x01 + +extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags); #endif