mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
blk-mq: factor out a helper blk_mq_limit_depth()
There are no functional changes, just make code cleaner. Signed-off-by: Yu Kuai <yukuai@fnnas.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -498,6 +498,42 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
|
||||
return rq_list_pop(data->cached_rqs);
|
||||
}
|
||||
|
||||
static void blk_mq_limit_depth(struct blk_mq_alloc_data *data)
|
||||
{
|
||||
struct elevator_mq_ops *ops;
|
||||
|
||||
/* If no I/O scheduler has been configured, don't limit requests */
|
||||
if (!data->q->elevator) {
|
||||
blk_mq_tag_busy(data->hctx);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* All requests use scheduler tags when an I/O scheduler is
|
||||
* enabled for the queue.
|
||||
*/
|
||||
data->rq_flags |= RQF_SCHED_TAGS;
|
||||
|
||||
/*
|
||||
* Flush/passthrough requests are special and go directly to the
|
||||
* dispatch list, they are not subject to the async_depth limit.
|
||||
*/
|
||||
if ((data->cmd_flags & REQ_OP_MASK) == REQ_OP_FLUSH ||
|
||||
blk_op_is_passthrough(data->cmd_flags))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
|
||||
data->rq_flags |= RQF_USE_SCHED;
|
||||
|
||||
/*
|
||||
* By default, sync requests have no limit, and async requests are
|
||||
* limited to async_depth.
|
||||
*/
|
||||
ops = &data->q->elevator->type->ops;
|
||||
if (ops->limit_depth)
|
||||
ops->limit_depth(data->cmd_flags, data);
|
||||
}
|
||||
|
||||
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
|
||||
{
|
||||
struct request_queue *q = data->q;
|
||||
@@ -516,31 +552,7 @@ retry:
|
||||
data->ctx = blk_mq_get_ctx(q);
|
||||
data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx);
|
||||
|
||||
if (q->elevator) {
|
||||
/*
|
||||
* All requests use scheduler tags when an I/O scheduler is
|
||||
* enabled for the queue.
|
||||
*/
|
||||
data->rq_flags |= RQF_SCHED_TAGS;
|
||||
|
||||
/*
|
||||
* Flush/passthrough requests are special and go directly to the
|
||||
* dispatch list.
|
||||
*/
|
||||
if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
|
||||
!blk_op_is_passthrough(data->cmd_flags)) {
|
||||
struct elevator_mq_ops *ops = &q->elevator->type->ops;
|
||||
|
||||
WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
|
||||
|
||||
data->rq_flags |= RQF_USE_SCHED;
|
||||
if (ops->limit_depth)
|
||||
ops->limit_depth(data->cmd_flags, data);
|
||||
}
|
||||
} else {
|
||||
blk_mq_tag_busy(data->hctx);
|
||||
}
|
||||
|
||||
blk_mq_limit_depth(data);
|
||||
if (data->flags & BLK_MQ_REQ_RESERVED)
|
||||
data->rq_flags |= RQF_RESV;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user