dect
/
linux-2.6
Archived
13
0
Fork 0

sched: clean up sleep_on() APIs

clean up the sleep_on() APIs:

 - do not use fastcall
 - replace fragile macro magic with proper inline functions

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2007-07-09 18:52:01 +02:00
parent 9761eea851
commit 0fec171cdb
2 changed files with 52 additions and 41 deletions

View File

@ -366,15 +366,15 @@ static inline void remove_wait_queue_locked(wait_queue_head_t *q,
/*
* These are the old interfaces to sleep waiting for an event.
* They are racy. DO NOT use them, use the wait_event* interfaces above.
* We plan to remove these interfaces during 2.7.
* They are racy. DO NOT use them, use the wait_event* interfaces above.
* We plan to remove these interfaces.
*/
extern void FASTCALL(sleep_on(wait_queue_head_t *q));
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
extern void sleep_on(wait_queue_head_t *q);
extern long sleep_on_timeout(wait_queue_head_t *q,
signed long timeout);
extern void interruptible_sleep_on(wait_queue_head_t *q);
extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout);
/*
* Waitqueues which are removed from the waitqueue_head at wakeup time

View File

@ -3699,74 +3699,85 @@ out:
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
#define SLEEP_ON_VAR \
unsigned long flags; \
wait_queue_t wait; \
init_waitqueue_entry(&wait, current);
#define SLEEP_ON_HEAD \
spin_lock_irqsave(&q->lock,flags); \
__add_wait_queue(q, &wait); \
spin_unlock(&q->lock);
#define SLEEP_ON_TAIL \
spin_lock_irq(&q->lock); \
__remove_wait_queue(q, &wait); \
spin_unlock_irqrestore(&q->lock, flags);
void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
static inline void
sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
{
SLEEP_ON_VAR
spin_lock_irqsave(&q->lock, *flags);
__add_wait_queue(q, wait);
spin_unlock(&q->lock);
}
static inline void
sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
{
spin_lock_irq(&q->lock);
__remove_wait_queue(q, wait);
spin_unlock_irqrestore(&q->lock, *flags);
}
void __sched interruptible_sleep_on(wait_queue_head_t *q)
{
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
current->state = TASK_INTERRUPTIBLE;
SLEEP_ON_HEAD
sleep_on_head(q, &wait, &flags);
schedule();
SLEEP_ON_TAIL
sleep_on_tail(q, &wait, &flags);
}
EXPORT_SYMBOL(interruptible_sleep_on);
long fastcall __sched
long __sched
interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
SLEEP_ON_VAR
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
current->state = TASK_INTERRUPTIBLE;
SLEEP_ON_HEAD
sleep_on_head(q, &wait, &flags);
timeout = schedule_timeout(timeout);
SLEEP_ON_TAIL
sleep_on_tail(q, &wait, &flags);
return timeout;
}
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
void fastcall __sched sleep_on(wait_queue_head_t *q)
void __sched sleep_on(wait_queue_head_t *q)
{
SLEEP_ON_VAR
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
current->state = TASK_UNINTERRUPTIBLE;
SLEEP_ON_HEAD
sleep_on_head(q, &wait, &flags);
schedule();
SLEEP_ON_TAIL
sleep_on_tail(q, &wait, &flags);
}
EXPORT_SYMBOL(sleep_on);
long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
SLEEP_ON_VAR
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
current->state = TASK_UNINTERRUPTIBLE;
SLEEP_ON_HEAD
sleep_on_head(q, &wait, &flags);
timeout = schedule_timeout(timeout);
SLEEP_ON_TAIL
sleep_on_tail(q, &wait, &flags);
return timeout;
}
EXPORT_SYMBOL(sleep_on_timeout);
#ifdef CONFIG_RT_MUTEXES