mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
block: avoid cpu_hotplug_lock depedency on freeze_lock
A recent lockdep[1] splat observed while running blktest block/005 reveals a potential deadlock caused by the cpu_hotplug_lock dependency on ->freeze_lock. This dependency was introduced by commit033b667a82
("block: blk-rq-qos: guard rq-qos helpers by static key"). That change added a static key to avoid fetching q->rq_qos when neither blk-wbt nor blk-iolatency is configured. The static key dynamically patches kernel text to a NOP when disabled, eliminating overhead of fetching q->rq_qos in the I/O hot path. However, enabling a static key at runtime requires acquiring both cpu_hotplug_lock and jump_label_mutex. When this happens after the queue has already been frozen (i.e., while holding ->freeze_lock), it creates a locking dependency from cpu_hotplug_lock to ->freeze_lock, which leads to a potential deadlock reported by lockdep [1]. To resolve this, replace the static key mechanism with q->queue_flags: QUEUE_FLAG_QOS_ENABLED. This flag is evaluated in the fast path before accessing q->rq_qos. If the flag is set, we proceed to fetch q->rq_qos; otherwise, the access is skipped. Since q->queue_flags is commonly accessed in IO hotpath and resides in the first cacheline of struct request_queue, checking it imposes minimal overhead while eliminating the deadlock risk. This change avoids the lockdep splat without introducing performance regressions. [1] https://lore.kernel.org/linux-block/4fdm37so3o4xricdgfosgmohn63aa7wj3ua4e5vpihoamwg3ui@fq42f5q5t5ic/ Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com> Closes: https://lore.kernel.org/linux-block/4fdm37so3o4xricdgfosgmohn63aa7wj3ua4e5vpihoamwg3ui@fq42f5q5t5ic/ Fixes:033b667a82
("block: blk-rq-qos: guard rq-qos helpers by static key") Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com> Signed-off-by: Nilay Shroff <nilay@linux.ibm.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Yu Kuai <yukuai3@huawei.com> Link: https://lore.kernel.org/r/20250814082612.500845-4-nilay@linux.ibm.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
ade1beea1c
commit
370ac285f2
@ -95,6 +95,7 @@ static const char *const blk_queue_flag_name[] = {
|
|||||||
QUEUE_FLAG_NAME(SQ_SCHED),
|
QUEUE_FLAG_NAME(SQ_SCHED),
|
||||||
QUEUE_FLAG_NAME(DISABLE_WBT_DEF),
|
QUEUE_FLAG_NAME(DISABLE_WBT_DEF),
|
||||||
QUEUE_FLAG_NAME(NO_ELV_SWITCH),
|
QUEUE_FLAG_NAME(NO_ELV_SWITCH),
|
||||||
|
QUEUE_FLAG_NAME(QOS_ENABLED),
|
||||||
};
|
};
|
||||||
#undef QUEUE_FLAG_NAME
|
#undef QUEUE_FLAG_NAME
|
||||||
|
|
||||||
|
@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
#include "blk-rq-qos.h"
|
#include "blk-rq-qos.h"
|
||||||
|
|
||||||
__read_mostly DEFINE_STATIC_KEY_FALSE(block_rq_qos);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
|
* Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
|
||||||
* false if 'v' + 1 would be bigger than 'below'.
|
* false if 'v' + 1 would be bigger than 'below'.
|
||||||
@ -319,8 +317,8 @@ void rq_qos_exit(struct request_queue *q)
|
|||||||
struct rq_qos *rqos = q->rq_qos;
|
struct rq_qos *rqos = q->rq_qos;
|
||||||
q->rq_qos = rqos->next;
|
q->rq_qos = rqos->next;
|
||||||
rqos->ops->exit(rqos);
|
rqos->ops->exit(rqos);
|
||||||
static_branch_dec(&block_rq_qos);
|
|
||||||
}
|
}
|
||||||
|
blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
|
||||||
mutex_unlock(&q->rq_qos_mutex);
|
mutex_unlock(&q->rq_qos_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -346,7 +344,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
|
|||||||
goto ebusy;
|
goto ebusy;
|
||||||
rqos->next = q->rq_qos;
|
rqos->next = q->rq_qos;
|
||||||
q->rq_qos = rqos;
|
q->rq_qos = rqos;
|
||||||
static_branch_inc(&block_rq_qos);
|
blk_queue_flag_set(QUEUE_FLAG_QOS_ENABLED, q);
|
||||||
|
|
||||||
blk_mq_unfreeze_queue(q, memflags);
|
blk_mq_unfreeze_queue(q, memflags);
|
||||||
|
|
||||||
@ -374,10 +372,11 @@ void rq_qos_del(struct rq_qos *rqos)
|
|||||||
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
|
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
|
||||||
if (*cur == rqos) {
|
if (*cur == rqos) {
|
||||||
*cur = rqos->next;
|
*cur = rqos->next;
|
||||||
static_branch_dec(&block_rq_qos);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!q->rq_qos)
|
||||||
|
blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
|
||||||
blk_mq_unfreeze_queue(q, memflags);
|
blk_mq_unfreeze_queue(q, memflags);
|
||||||
|
|
||||||
mutex_lock(&q->debugfs_mutex);
|
mutex_lock(&q->debugfs_mutex);
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
#include "blk-mq-debugfs.h"
|
#include "blk-mq-debugfs.h"
|
||||||
|
|
||||||
struct blk_mq_debugfs_attr;
|
struct blk_mq_debugfs_attr;
|
||||||
extern struct static_key_false block_rq_qos;
|
|
||||||
|
|
||||||
enum rq_qos_id {
|
enum rq_qos_id {
|
||||||
RQ_QOS_WBT,
|
RQ_QOS_WBT,
|
||||||
@ -113,35 +112,41 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
|
|||||||
|
|
||||||
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
|
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos)
|
||||||
__rq_qos_cleanup(q->rq_qos, bio);
|
__rq_qos_cleanup(q->rq_qos, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
|
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos &&
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
!blk_rq_is_passthrough(rq))
|
q->rq_qos && !blk_rq_is_passthrough(rq))
|
||||||
__rq_qos_done(q->rq_qos, rq);
|
__rq_qos_done(q->rq_qos, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
|
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos)
|
||||||
__rq_qos_issue(q->rq_qos, rq);
|
__rq_qos_issue(q->rq_qos, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
|
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos)
|
||||||
__rq_qos_requeue(q->rq_qos, rq);
|
__rq_qos_requeue(q->rq_qos, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_done_bio(struct bio *bio)
|
static inline void rq_qos_done_bio(struct bio *bio)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) &&
|
struct request_queue *q;
|
||||||
bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
|
|
||||||
bio_flagged(bio, BIO_QOS_MERGED))) {
|
if (!bio->bi_bdev || (!bio_flagged(bio, BIO_QOS_THROTTLED) &&
|
||||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
!bio_flagged(bio, BIO_QOS_MERGED)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
q = bdev_get_queue(bio->bi_bdev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a bio has BIO_QOS_xxx set, it implicitly implies that
|
* If a bio has BIO_QOS_xxx set, it implicitly implies that
|
||||||
@ -150,12 +155,12 @@ static inline void rq_qos_done_bio(struct bio *bio)
|
|||||||
* __rq_qos_done_bio().
|
* __rq_qos_done_bio().
|
||||||
*/
|
*/
|
||||||
__rq_qos_done_bio(q->rq_qos, bio);
|
__rq_qos_done_bio(q->rq_qos, bio);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos) {
|
||||||
bio_set_flag(bio, BIO_QOS_THROTTLED);
|
bio_set_flag(bio, BIO_QOS_THROTTLED);
|
||||||
__rq_qos_throttle(q->rq_qos, bio);
|
__rq_qos_throttle(q->rq_qos, bio);
|
||||||
}
|
}
|
||||||
@ -164,14 +169,16 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
|
|||||||
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
|
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos)
|
||||||
__rq_qos_track(q->rq_qos, rq, bio);
|
__rq_qos_track(q->rq_qos, rq, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos) {
|
||||||
bio_set_flag(bio, BIO_QOS_MERGED);
|
bio_set_flag(bio, BIO_QOS_MERGED);
|
||||||
__rq_qos_merge(q->rq_qos, rq, bio);
|
__rq_qos_merge(q->rq_qos, rq, bio);
|
||||||
}
|
}
|
||||||
@ -179,7 +186,8 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
|||||||
|
|
||||||
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
|
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
|
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
|
||||||
|
q->rq_qos)
|
||||||
__rq_qos_queue_depth_changed(q->rq_qos);
|
__rq_qos_queue_depth_changed(q->rq_qos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -656,6 +656,7 @@ enum {
|
|||||||
QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
|
QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
|
||||||
QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
|
QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
|
||||||
QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
|
QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
|
||||||
|
QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
|
||||||
QUEUE_FLAG_MAX
|
QUEUE_FLAG_MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user