summaryrefslogtreecommitdiff
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e5657788fedd..1b9c31549797 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -70,7 +70,6 @@ int number_of_cpusets __read_mostly;
/* Forward declare cgroup structures */
struct cgroup_subsys cpuset_subsys;
-struct cpuset;
/* See "Frequency meter" comments, below. */
@@ -846,7 +845,7 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
{
struct cpuset *cpus_cs;
- cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cg));
+ cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cgrp));
set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
}
@@ -867,7 +866,7 @@ static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
{
struct cgroup_scanner scan;
- scan.cg = cs->css.cgroup;
+ scan.cgrp = cs->css.cgroup;
scan.test_task = NULL;
scan.process_task = cpuset_change_cpumask;
scan.heap = heap;
@@ -1063,7 +1062,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
static void cpuset_change_nodemask(struct task_struct *p,
struct cgroup_scanner *scan)
{
- struct cpuset *cs = cgroup_cs(scan->cg);
+ struct cpuset *cs = cgroup_cs(scan->cgrp);
struct mm_struct *mm;
int migrate;
nodemask_t *newmems = scan->data;
@@ -1103,7 +1102,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
guarantee_online_mems(mems_cs, &newmems);
- scan.cg = cs->css.cgroup;
+ scan.cgrp = cs->css.cgroup;
scan.test_task = NULL;
scan.process_task = cpuset_change_nodemask;
scan.heap = heap;
@@ -1276,7 +1275,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
static void cpuset_change_flag(struct task_struct *tsk,
struct cgroup_scanner *scan)
{
- cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
+ cpuset_update_task_spread_flag(cgroup_cs(scan->cgrp), tsk);
}
/*
@@ -1296,7 +1295,7 @@ static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
{
struct cgroup_scanner scan;
- scan.cg = cs->css.cgroup;
+ scan.cgrp = cs->css.cgroup;
scan.test_task = NULL;
scan.process_task = cpuset_change_flag;
scan.heap = heap;
@@ -1972,7 +1971,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
struct cpuset *cs = cgroup_cs(cgrp);
struct cpuset *parent = parent_cs(cs);
struct cpuset *tmp_cs;
- struct cgroup *pos_cg;
+ struct cgroup *pos_cgrp;
if (!parent)
return 0;
@@ -2004,7 +2003,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
* (and likewise for mems) to the new cgroup.
*/
rcu_read_lock();
- cpuset_for_each_child(tmp_cs, pos_cg, parent) {
+ cpuset_for_each_child(tmp_cs, pos_cgrp, parent) {
if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
rcu_read_unlock();
goto out_unlock;
@@ -2021,6 +2020,12 @@ out_unlock:
return 0;
}
+/*
+ * If the cpuset being removed has its flag 'sched_load_balance'
+ * enabled, then simulate turning sched_load_balance off, which
+ * will call rebuild_sched_domains_locked().
+ */
+
static void cpuset_css_offline(struct cgroup *cgrp)
{
struct cpuset *cs = cgroup_cs(cgrp);
@@ -2036,12 +2041,6 @@ static void cpuset_css_offline(struct cgroup *cgrp)
mutex_unlock(&cpuset_mutex);
}
-/*
- * If the cpuset being removed has its flag 'sched_load_balance'
- * enabled, then simulate turning sched_load_balance off, which
- * will call rebuild_sched_domains_locked().
- */
-
static void cpuset_css_free(struct cgroup *cgrp)
{
struct cpuset *cs = cgroup_cs(cgrp);