diff options
| author | Tejun Heo <tj@kernel.org> | 2011-10-19 14:33:05 +0200 | 
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2011-10-19 14:33:05 +0200 | 
| commit | da8303c63b8de73619884382d6e573d44aae0810 (patch) | |
| tree | d8560fa6452c5a7583aa21a2e5505d68899e2df5 /block/blk-core.c | |
| parent | bc16a4f933bc5ed50826b20561e4c3515061998b (diff) | |
block: make get_request[_wait]() fail if queue is dead
Currently get_request[_wait]() allocates request whether queue is dead
or not.  This patch makes get_request[_wait]() return NULL if @q is
dead.  blk_queue_bio() is updated to fail the submitted bio if request
allocation fails.  While at it, add docbook comments for
get_request[_wait]().
Note that the current code has rather unclear (there are spurious DEAD
tests scattered around) assumption that the owner of a queue
guarantees that no request travels block layer if the queue is dead
and this patch in itself doesn't change much; however, this will allow
fixing the broken assumption in the next patch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
| -rw-r--r-- | block/blk-core.c | 54 | 
1 files changed, 38 insertions, 16 deletions
| diff --git a/block/blk-core.c b/block/blk-core.c index 6c491f2388e9..3508751c779a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -709,10 +709,19 @@ static bool blk_rq_should_init_elevator(struct bio *bio)  	return true;  } -/* - * Get a free request, queue_lock must be held. - * Returns NULL on failure, with queue_lock held. - * Returns !NULL on success, with queue_lock *not held*. +/** + * get_request - get a free request + * @q: request_queue to allocate request from + * @rw_flags: RW and SYNC flags + * @bio: bio to allocate request for (can be %NULL) + * @gfp_mask: allocation mask + * + * Get a free request from @q.  This function may fail under memory + * pressure or if @q is dead. + * + * Must be callled with @q->queue_lock held and, + * Returns %NULL on failure, with @q->queue_lock held. + * Returns !%NULL on success, with @q->queue_lock *not held*.   */  static struct request *get_request(struct request_queue *q, int rw_flags,  				   struct bio *bio, gfp_t gfp_mask) @@ -723,6 +732,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags,  	const bool is_sync = rw_is_sync(rw_flags) != 0;  	int may_queue; +	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) +		return NULL; +  	may_queue = elv_may_queue(q, rw_flags);  	if (may_queue == ELV_MQUEUE_NO)  		goto rq_starved; @@ -815,11 +827,18 @@ out:  	return rq;  } -/* - * No available requests for this queue, wait for some requests to become - * available. +/** + * get_request_wait - get a free request with retry + * @q: request_queue to allocate request from + * @rw_flags: RW and SYNC flags + * @bio: bio to allocate request for (can be %NULL) + * + * Get a free request from @q.  This function keeps retrying under memory + * pressure and fails iff @q is dead.   * - * Called with q->queue_lock held, and returns with it unlocked. + * Must be callled with @q->queue_lock held and, + * Returns %NULL on failure, with @q->queue_lock held. + * Returns !%NULL on success, with @q->queue_lock *not held*.   */  static struct request *get_request_wait(struct request_queue *q, int rw_flags,  					struct bio *bio) @@ -833,6 +852,9 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,  		struct io_context *ioc;  		struct request_list *rl = &q->rq; +		if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) +			return NULL; +  		prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,  				TASK_UNINTERRUPTIBLE); @@ -863,19 +885,15 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)  {  	struct request *rq; -	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) -		return NULL; -  	BUG_ON(rw != READ && rw != WRITE);  	spin_lock_irq(q->queue_lock); -	if (gfp_mask & __GFP_WAIT) { +	if (gfp_mask & __GFP_WAIT)  		rq = get_request_wait(q, rw, NULL); -	} else { +	else  		rq = get_request(q, rw, NULL, gfp_mask); -		if (!rq) -			spin_unlock_irq(q->queue_lock); -	} +	if (!rq) +		spin_unlock_irq(q->queue_lock);  	/* q->queue_lock is unlocked at this point */  	return rq; @@ -1299,6 +1317,10 @@ get_rq:  	 * Returns with the queue unlocked.  	 */  	req = get_request_wait(q, rw_flags, bio); +	if (unlikely(!req)) { +		bio_endio(bio, -ENODEV);	/* @q is dead */ +		goto out_unlock; +	}  	/*  	 * After dropping the lock and possibly sleeping here, our request | 
