mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
block: introduce blk_queue_rot()
To check if a request queue is for a rotational device, a double negation is needed with the pattern "!blk_queue_nonrot(q)". Simplify this with the introduction of the helper blk_queue_rot() which tests if a requests queue limit has the BLK_FEAT_ROTATIONAL feature set. All call sites of blk_queue_nonrot() are modified to use blk_queue_rot() and blk_queue_nonrot() definition removed. No functional changes. Signed-off-by: Damien Le Moal <dlemoal@kernel.org> Reviewed-by: Nitesh Shetty <nj.shetty@samsung.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
068f5b5ef5
commit
2719bd1ee1
@@ -231,7 +231,7 @@ static struct kmem_cache *bfq_pool;
|
||||
#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
|
||||
(get_sdist(last_pos, rq) > \
|
||||
BFQQ_SEEK_THR && \
|
||||
(!blk_queue_nonrot(bfqd->queue) || \
|
||||
(blk_queue_rot(bfqd->queue) || \
|
||||
blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
|
||||
#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
|
||||
#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
|
||||
@@ -4165,7 +4165,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
|
||||
/* don't use too short time intervals */
|
||||
if (delta_usecs < 1000) {
|
||||
if (blk_queue_nonrot(bfqd->queue))
|
||||
if (!blk_queue_rot(bfqd->queue))
|
||||
/*
|
||||
* give same worst-case guarantees as idling
|
||||
* for seeky
|
||||
@@ -4487,7 +4487,7 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
|
||||
struct bfq_queue *bfqq)
|
||||
{
|
||||
bool rot_without_queueing =
|
||||
!blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
|
||||
blk_queue_rot(bfqd->queue) && !bfqd->hw_tag,
|
||||
bfqq_sequential_and_IO_bound,
|
||||
idling_boosts_thr;
|
||||
|
||||
@@ -4521,7 +4521,7 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
|
||||
* flash-based device.
|
||||
*/
|
||||
idling_boosts_thr = rot_without_queueing ||
|
||||
((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
|
||||
((blk_queue_rot(bfqd->queue) || !bfqd->hw_tag) &&
|
||||
bfqq_sequential_and_IO_bound);
|
||||
|
||||
/*
|
||||
@@ -4722,7 +4722,7 @@ bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
|
||||
* there is only one in-flight large request
|
||||
* at a time.
|
||||
*/
|
||||
if (blk_queue_nonrot(bfqd->queue) &&
|
||||
if (!blk_queue_rot(bfqd->queue) &&
|
||||
blk_rq_sectors(bfqq->next_rq) >=
|
||||
BFQQ_SECT_THR_NONROT &&
|
||||
bfqd->tot_rq_in_driver >= 1)
|
||||
@@ -6340,7 +6340,7 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd)
|
||||
bfqd->hw_tag_samples = 0;
|
||||
|
||||
bfqd->nonrot_with_queueing =
|
||||
blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
|
||||
!blk_queue_rot(bfqd->queue) && bfqd->hw_tag;
|
||||
}
|
||||
|
||||
static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
|
||||
@@ -7293,7 +7293,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
|
||||
INIT_HLIST_HEAD(&bfqd->burst_list);
|
||||
|
||||
bfqd->hw_tag = -1;
|
||||
bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
|
||||
bfqd->nonrot_with_queueing = !blk_queue_rot(bfqd->queue);
|
||||
|
||||
bfqd->bfq_max_budget = bfq_default_max_budget;
|
||||
|
||||
@@ -7328,9 +7328,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
|
||||
* Begin by assuming, optimistically, that the device peak
|
||||
* rate is equal to 2/3 of the highest reference rate.
|
||||
*/
|
||||
bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
|
||||
ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
|
||||
bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
|
||||
bfqd->rate_dur_prod = ref_rate[!blk_queue_rot(bfqd->queue)] *
|
||||
ref_wr_duration[!blk_queue_rot(bfqd->queue)];
|
||||
bfqd->peak_rate = ref_rate[!blk_queue_rot(bfqd->queue)] * 2 / 3;
|
||||
|
||||
/* see comments on the definition of next field inside bfq_data */
|
||||
bfqd->actuator_load_threshold = 4;
|
||||
|
||||
@@ -812,7 +812,7 @@ static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk)
|
||||
u64 now_ns;
|
||||
|
||||
/* rotational? */
|
||||
if (!blk_queue_nonrot(disk->queue))
|
||||
if (blk_queue_rot(disk->queue))
|
||||
return AUTOP_HDD;
|
||||
|
||||
/* handle SATA SSDs w/ broken NCQ */
|
||||
|
||||
@@ -988,10 +988,7 @@ static void iolatency_pd_init(struct blkg_policy_data *pd)
|
||||
u64 now = blk_time_get_ns();
|
||||
int cpu;
|
||||
|
||||
if (blk_queue_nonrot(blkg->q))
|
||||
iolat->ssd = true;
|
||||
else
|
||||
iolat->ssd = false;
|
||||
iolat->ssd = !blk_queue_rot(blkg->q);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct latency_stat *stat;
|
||||
|
||||
@@ -747,10 +747,9 @@ u64 wbt_default_latency_nsec(struct request_queue *q)
|
||||
* We default to 2msec for non-rotational storage, and 75msec
|
||||
* for rotational storage.
|
||||
*/
|
||||
if (blk_queue_nonrot(q))
|
||||
return 2000000ULL;
|
||||
else
|
||||
if (blk_queue_rot(q))
|
||||
return 75000000ULL;
|
||||
return 2000000ULL;
|
||||
}
|
||||
|
||||
static int wbt_data_dir(const struct request *rq)
|
||||
|
||||
@@ -680,7 +680,7 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
|
||||
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
|
||||
#define blk_queue_noxmerges(q) \
|
||||
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
|
||||
#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
|
||||
#define blk_queue_rot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
|
||||
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
|
||||
#define blk_queue_passthrough_stat(q) \
|
||||
((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
|
||||
@@ -1463,7 +1463,7 @@ bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
|
||||
|
||||
static inline bool bdev_nonrot(struct block_device *bdev)
|
||||
{
|
||||
return blk_queue_nonrot(bdev_get_queue(bdev));
|
||||
return !blk_queue_rot(bdev_get_queue(bdev));
|
||||
}
|
||||
|
||||
static inline bool bdev_synchronous(struct block_device *bdev)
|
||||
|
||||
Reference in New Issue
Block a user