summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorRicardo Neri <ricardo.neri-calderon@linux.intel.com>2023-04-06 13:31:40 -0700
committerPeter Zijlstra <peterz@infradead.org>2023-05-08 10:58:35 +0200
commit18ad34532755feb5b9f4284b07769b1bfec18ab3 (patch)
tree40dce503a9ee80901b07c6895434250ca9efd96f /kernel/sched
parentef7657d4d2d6a8456aa624010de456c32a135fe9 (diff)
downloadlinux-next-18ad34532755feb5b9f4284b07769b1bfec18ab3.tar.gz
sched/fair: Let low-priority cores help high-priority busy SMT cores
Using asym_packing priorities within an SMT core is straightforward. Just follow the priorities that hardware indicates. When balancing load from an SMT core, also consider the idle state of its siblings. Priorities do not reflect that an SMT core divides its throughput among all its busy siblings. They only makes sense when exactly one sibling is busy. Indicate that active balance is needed if the destination CPU has lower priority than the source CPU but the latter has busy SMT siblings. Make find_busiest_queue() not skip higher-priority SMT cores with more than busy sibling. Suggested-by: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Zhang Rui <rui.zhang@intel.com> Link: https://lore.kernel.org/r/20230406203148.19182-5-ricardo.neri-calderon@linux.intel.com
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a8a02ae7d082..85ce2494234d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10551,8 +10551,15 @@ static struct rq *find_busiest_queue(struct lb_env *env,
nr_running == 1)
continue;
- /* Make sure we only pull tasks from a CPU of lower priority */
+ /*
+ * Make sure we only pull tasks from a CPU of lower priority
+ * when balancing between SMT siblings.
+ *
+ * If balancing between cores, let lower priority CPUs help
+ * SMT cores with more than one busy sibling.
+ */
if ((env->sd->flags & SD_ASYM_PACKING) &&
+ sched_use_asym_prio(env->sd, i) &&
sched_asym_prefer(i, env->dst_cpu) &&
nr_running == 1)
continue;
@@ -10645,10 +10652,15 @@ asym_active_balance(struct lb_env *env)
* priority CPUs in order to pack all tasks in the highest priority
* CPUs. When done between cores, do it only if the whole core if the
* whole core is idle.
+ *
+ * If @env::src_cpu is an SMT core with busy siblings, let
+ * the lower priority @env::dst_cpu help it. Do not follow
+ * CPU priority.
*/
return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
sched_use_asym_prio(env->sd, env->dst_cpu) &&
- sched_asym_prefer(env->dst_cpu, env->src_cpu);
+ (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
+ !sched_use_asym_prio(env->sd, env->src_cpu));
}
static inline bool