diff options
author | David S. Miller <davem@davemloft.net> | 2010-10-06 19:39:31 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-06 19:39:31 -0700 |
commit | 69259abb64d4da77273bf59accfc9fa79e7165f4 (patch) | |
tree | bd043ab03a788b749c8d5ae4049d8defae9abf34 /kernel/sched_fair.c | |
parent | dd53df265b1ee7a1fbbc76bb62c3bec2383bbd44 (diff) | |
parent | 12e94471b2be5ef9b55b10004a3a2cd819490036 (diff) | |
download | linux-next-69259abb64d4da77273bf59accfc9fa79e7165f4.tar.gz |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/pcmcia/pcnet_cs.c
net/caif/caif_socket.c
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ab661ebc4895..db3f674ca49d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling * Minimal preemption granularity for CPU-bound tasks: * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_min_granularity = 2000000ULL; -unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; +unsigned int sysctl_sched_min_granularity = 750000ULL; +unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; /* * is kept at sysctl_sched_latency / sysctl_sched_min_granularity */ -static unsigned int sched_nr_latency = 3; +static unsigned int sched_nr_latency = 8; /* * After fork, child runs first. If set to 0 (default) then @@ -1313,7 +1313,7 @@ static struct sched_group * find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu, int load_idx) { - struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; + struct sched_group *idlest = NULL, *group = sd->groups; unsigned long min_load = ULONG_MAX, this_load = 0; int imbalance = 100 + (sd->imbalance_pct-100)/2; @@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, if (local_group) { this_load = avg_load; - this = group; } else if (avg_load < min_load) { min_load = avg_load; idlest = group; @@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu) struct rq *rq = cpu_rq(cpu); u64 total, available; - sched_avg_update(rq); - total = sched_avg_period() + (rq->clock - rq->age_stamp); available = total - rq->rt_avg; @@ -3633,7 +3630,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) if (time_before(now, nohz.next_balance)) return 0; - if (!rq->nr_running) + if (rq->idle_at_tick) return 0; first_pick_cpu = atomic_read(&nohz.first_pick_cpu); |