block, bfq: convert to use request_queue->async_depth

The default limits is unchanged, and user can configure async_depth now.

Signed-off-by: Yu Kuai <yukuai@fnnas.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Yu Kuai
2026-02-03 16:19:48 +08:00
committed by Jens Axboe
parent 988bb1b9ed
commit 2110858c51

View File

@@ -7112,39 +7112,29 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
static void bfq_depth_updated(struct request_queue *q)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
unsigned int nr_requests = q->nr_requests;
unsigned int async_depth = q->async_depth;
/*
* In-word depths if no bfq_queue is being weight-raised:
* leaving 25% of tags only for sync reads.
* By default:
* - sync reads are not limited
* If bfqq is not being weight-raised:
* - sync writes are limited to 75%(async depth default value)
* - async IO are limited to 50%
* If bfqq is being weight-raised:
* - sync writes are limited to ~37%
* - async IO are limited to ~18
*
* In next formulas, right-shift the value
* (1U<<bt->sb.shift), instead of computing directly
* (1U<<(bt->sb.shift - something)), to be robust against
* any possible value of bt->sb.shift, without having to
* limit 'something'.
* If request_queue->async_depth is updated by user, all limit are
* updated relatively.
*/
/* no more than 50% of tags for async I/O */
bfqd->async_depths[0][0] = max(nr_requests >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
bfqd->async_depths[0][1] = max((nr_requests * 3) >> 2, 1U);
bfqd->async_depths[0][1] = async_depth;
bfqd->async_depths[0][0] = max(async_depth * 2 / 3, 1U);
bfqd->async_depths[1][1] = max(async_depth >> 1, 1U);
bfqd->async_depths[1][0] = max(async_depth >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
* raised: leaving ~63% of tags for sync reads. This is the
* highest percentage for which, in our tests, application
* start-up times didn't suffer from any regression due to tag
* shortage.
* Due to cgroup qos, the allowed request for bfqq might be 1
*/
/* no more than ~18% of tags for async I/O */
bfqd->async_depths[1][0] = max((nr_requests * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
bfqd->async_depths[1][1] = max((nr_requests * 6) >> 4, 1U);
blk_mq_set_min_shallow_depth(q, 1);
}
@@ -7365,6 +7355,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
blk_queue_flag_set(QUEUE_FLAG_DISABLE_WBT_DEF, q);
wbt_disable_default(q->disk);
blk_stat_enable_accounting(q);
q->async_depth = (q->nr_requests * 3) >> 2;
return 0;