dect
/
linux-2.6
Archived
13
0
Fork 0

block: Let blk_drain_queue() caller obtain the queue lock

Let the caller of blk_drain_queue() obtain the queue lock to improve
readability of the patch called "Avoid that request_fn is invoked on
a dead queue".

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: James Bottomley <JBottomley@Parallels.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Chanho Min <chanho.min@lge.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Bart Van Assche 2012-11-28 13:43:38 +01:00 committed by Jens Axboe
parent 3f3299d5c0
commit 807592a4fa
1 changed files with 18 additions and 12 deletions

View File

@ -349,7 +349,7 @@ void blk_put_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_put_queue);
/**
* blk_drain_queue - drain requests from request_queue
* __blk_drain_queue - drain requests from request_queue
* @q: queue to drain
* @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
*
@ -357,15 +357,17 @@ EXPORT_SYMBOL(blk_put_queue);
* If not, only ELVPRIV requests are drained. The caller is responsible
* for ensuring that no new requests which need to be drained are queued.
*/
void blk_drain_queue(struct request_queue *q, bool drain_all)
static void __blk_drain_queue(struct request_queue *q, bool drain_all)
__releases(q->queue_lock)
__acquires(q->queue_lock)
{
int i;
lockdep_assert_held(q->queue_lock);
while (true) {
bool drain = false;
spin_lock_irq(q->queue_lock);
/*
* The caller might be trying to drain @q before its
* elevator is initialized.
@ -401,11 +403,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
}
}
spin_unlock_irq(q->queue_lock);
if (!drain)
break;
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
}
/*
@ -416,13 +421,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (q->request_fn) {
struct request_list *rl;
spin_lock_irq(q->queue_lock);
blk_queue_for_each_rl(rl, q)
for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
wake_up_all(&rl->wait[i]);
spin_unlock_irq(q->queue_lock);
}
}
@ -446,7 +447,10 @@ void blk_queue_bypass_start(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
if (drain) {
blk_drain_queue(q, false);
spin_lock_irq(q->queue_lock);
__blk_drain_queue(q, false);
spin_unlock_irq(q->queue_lock);
/* ensure blk_queue_bypass() is %true inside RCU read lock */
synchronize_rcu();
}
@ -504,7 +508,9 @@ void blk_cleanup_queue(struct request_queue *q)
mutex_unlock(&q->sysfs_lock);
/* drain all requests queued before DYING marking */
blk_drain_queue(q, true);
spin_lock_irq(lock);
__blk_drain_queue(q, true);
spin_unlock_irq(lock);
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);