Merge branch 'block-6.19' into for-7.0/block

Merge in fixes that went to 6.19 after for-7.0/block was branched.
Pending ublk changes depend on particularly the async scan work.

* block-6.19:
  block: zero non-PI portion of auto integrity buffer
  ublk: fix use-after-free in ublk_partition_scan_work
  blk-mq: avoid stall during boot due to synchronize_rcu_expedited
  loop: add missing bd_abort_claiming in loop_set_status
  block: don't merge bios with different app_tags
  blk-rq-qos: Remove unlikely() hints from QoS checks
  loop: don't change loop device under exclusive opener in loop_set_status
  block, bfq: update outdated comment
  blk-mq: skip CPU offline notify on unmapped hctx
  selftests/ublk: fix Makefile to rebuild on header changes
  selftests/ublk: add test for async partition scan
  ublk: scan partition in async way
  block,bfq: fix aux stat accumulation destination
  md: Fix forward incompatibility from configurable logical block size
  md: Fix logical_block_size configuration being overwritten
  md: suspend array while updating raid_disks via sysfs
  md/raid5: fix possible null-pointer dereferences in raid5_store_group_thread_cnt()
  md: Fix static checker warning in analyze_sbs
This commit is contained in:
Jens Axboe
2026-01-11 13:16:36 -07:00
13 changed files with 243 additions and 63 deletions

View File

@@ -380,7 +380,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
blkg_rwstat_add_aux(&to->merged, &from->merged);
blkg_rwstat_add_aux(&to->service_time, &from->service_time);
blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
bfq_stat_add_aux(&from->time, &from->time);
bfq_stat_add_aux(&to->time, &from->time);
bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
bfq_stat_add_aux(&to->avg_queue_size_samples,
&from->avg_queue_size_samples);

View File

@@ -984,7 +984,7 @@ struct bfq_group_data {
* unused for the root group. Used to know whether there
* are groups with more than one active @bfq_entity
* (see the comments to the function
* bfq_bfqq_may_idle()).
* bfq_better_to_idle()).
* @rq_pos_tree: rbtree sorted by next_request position, used when
* determining if two or more queues have interleaving
* requests (see bfq_find_close_cooperator()).

View File

@@ -128,7 +128,7 @@ bool bio_integrity_prep(struct bio *bio)
return true;
set_flags = false;
gfp |= __GFP_ZERO;
} else if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
} else if (bi->metadata_size > bi->pi_tuple_size)
gfp |= __GFP_ZERO;
break;
default:

View File

@@ -140,14 +140,21 @@ EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
struct request *next)
{
struct bio_integrity_payload *bip, *bip_next;
if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
return true;
if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
return false;
if (bio_integrity(req->bio)->bip_flags !=
bio_integrity(next->bio)->bip_flags)
bip = bio_integrity(req->bio);
bip_next = bio_integrity(next->bio);
if (bip->bip_flags != bip_next->bip_flags)
return false;
if (bip->bip_flags & BIP_CHECK_APPTAG &&
bip->app_tag != bip_next->app_tag)
return false;
if (req->nr_integrity_segments + next->nr_integrity_segments >
@@ -163,15 +170,21 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
struct bio *bio)
{
struct bio_integrity_payload *bip, *bip_bio = bio_integrity(bio);
int nr_integrity_segs;
if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
if (blk_integrity_rq(req) == 0 && bip_bio == NULL)
return true;
if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
if (blk_integrity_rq(req) == 0 || bip_bio == NULL)
return false;
if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
bip = bio_integrity(req->bio);
if (bip->bip_flags != bip_bio->bip_flags)
return false;
if (bip->bip_flags & BIP_CHECK_APPTAG &&
bip->app_tag != bip_bio->app_tag)
return false;
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);

View File

@@ -3721,7 +3721,7 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
struct blk_mq_hw_ctx, cpuhp_online);
int ret = 0;
if (blk_mq_hctx_has_online_cpu(hctx, cpu))
if (!hctx->nr_ctx || blk_mq_hctx_has_online_cpu(hctx, cpu))
return 0;
/*
@@ -4553,8 +4553,7 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
* Make sure reading the old queue_hw_ctx from other
* context concurrently won't trigger uaf.
*/
synchronize_rcu_expedited();
kfree(hctxs);
kfree_rcu_mightsleep(hctxs);
hctxs = new_hctxs;
}

View File

@@ -112,29 +112,26 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_cleanup(q->rq_qos, bio);
}
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos && !blk_rq_is_passthrough(rq))
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) &&
q->rq_qos && !blk_rq_is_passthrough(rq))
__rq_qos_done(q->rq_qos, rq);
}
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_issue(q->rq_qos, rq);
}
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_requeue(q->rq_qos, rq);
}
@@ -162,8 +159,7 @@ static inline void rq_qos_done_bio(struct bio *bio)
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos) {
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_THROTTLED);
__rq_qos_throttle(q->rq_qos, bio);
}
@@ -172,16 +168,14 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_track(q->rq_qos, rq, bio);
}
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos) {
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_MERGED);
__rq_qos_merge(q->rq_qos, rq, bio);
}
@@ -189,8 +183,7 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_queue_depth_changed(q->rq_qos);
}