dect
/
linux-2.6
Archived
13
0
Fork 0

[PATCH] sched: move idle status calculation into rebalance_tick()

Perform the idle state determination in rebalance_tick.

If we separate balancing from sched_tick then we also need to determine the
idle state in rebalance_tick.

V2->V3
	Remove useless idlle != 0 check. Checking nr_running seems
	to be sufficient. Thanks Suresh.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Peter Williams <pwil3058@bigpond.net.au>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Christoph Lameter 2006-12-10 02:20:23 -08:00 committed by Linus Torvalds
parent 7835b98bc6
commit e418e1c2bf
1 changed files with 16 additions and 21 deletions

View File

@ -2867,10 +2867,16 @@ static void update_load(struct rq *this_rq)
*/
static void
rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
rebalance_tick(int this_cpu, struct rq *this_rq)
{
unsigned long interval;
struct sched_domain *sd;
/*
* We are idle if there are no processes running. This
* is valid even if we are the idle process (SMT).
*/
enum idle_type idle = !this_rq->nr_running ?
SCHED_IDLE : NOT_IDLE;
for_each_domain(this_cpu, sd) {
if (!(sd->flags & SD_LOAD_BALANCE))
@ -2902,37 +2908,26 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
/*
* on UP we do not need to balance between CPUs:
*/
static inline void rebalance_tick(int cpu, struct rq *rq)
{
}
static inline void idle_balance(int cpu, struct rq *rq)
{
}
static inline void update_load(struct rq *this_rq)
{
}
#endif
static inline int wake_priority_sleeper(struct rq *rq)
static inline void wake_priority_sleeper(struct rq *rq)
{
int ret = 0;
#ifdef CONFIG_SCHED_SMT
if (!rq->nr_running)
return 0;
return;
spin_lock(&rq->lock);
/*
* If an SMT sibling task has been put to sleep for priority
* reasons reschedule the idle task to see if it can now run.
*/
if (rq->nr_running) {
if (rq->nr_running)
resched_task(rq->idle);
ret = 1;
}
spin_unlock(&rq->lock);
#endif
return ret;
}
DEFINE_PER_CPU(struct kernel_stat, kstat);
@ -3148,20 +3143,20 @@ void scheduler_tick(void)
struct task_struct *p = current;
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
enum idle_type idle = NOT_IDLE;
update_cpu_clock(p, rq, now);
rq->timestamp_last_tick = now;
if (p == rq->idle) {
if (p == rq->idle)
/* Task on the idle queue */
if (!wake_priority_sleeper(rq))
idle = SCHED_IDLE;
} else
wake_priority_sleeper(rq);
else
task_running_tick(rq, p);
#ifdef CONFIG_SMP
update_load(rq);
rebalance_tick(cpu, rq, idle);
rebalance_tick(cpu, rq);
#endif
}
#ifdef CONFIG_SCHED_SMT