mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
Merge tag 'block-7.0-20260305' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull block fixes from Jens Axboe:
- NVMe pull request via Keith:
- Improve quirk visibility and configurability (Maurizio)
- Fix runtime user modification to queue setup (Keith)
- Fix multipath leak on try_module_get failure (Keith)
- Ignore ambiguous spec definitions for better atomics support
(John)
- Fix admin queue leak on controller reset (Ming)
- Fix large allocation in persistent reservation read keys
(Sungwoo Kim)
- Fix fcloop callback handling (Justin)
- Securely free DHCHAP secrets (Daniel)
- Various cleanups and typo fixes (John, Wilfred)
- Avoid a circular lock dependency issue in the sysfs nr_requests or
scheduler store handling
- Fix a circular lock dependency with the pcpu mutex and the queue
freeze lock
- Cleanup for bio_copy_kern(), using __bio_add_page() rather than the
bio_add_page(), as adding a page here cannot fail. The exiting code
had broken cleanup for the error condition, so make it clear that the
error condition cannot happen
- Fix for a __this_cpu_read() in preemptible context splat
* tag 'block-7.0-20260305' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
block: use trylock to avoid lockdep circular dependency in sysfs
nvme: fix memory allocation in nvme_pr_read_keys()
block: use __bio_add_page in bio_copy_kern
block: break pcpu_alloc_mutex dependency on freeze_lock
blktrace: fix __this_cpu_read/write in preemptible context
nvme-multipath: fix leak on try_module_get failure
nvmet-fcloop: Check remoteport port_state before calling done callback
nvme-pci: do not try to add queue maps at runtime
nvme-pci: cap queue creation to used queues
nvme-pci: ensure we're polling a polled queue
nvme: fix memory leak in quirks_param_set()
nvme: correct comment about nvme_ns_remove()
nvme: stop setting namespace gendisk device driver data
nvme: add support for dynamic quirk configuration via module parameter
nvme: fix admin queue leak on controller reset
nvme-fabrics: use kfree_sensitive() for DHCHAP secrets
nvme: stop using AWUPF
nvme: expose active quirks in sysfs
nvme/host: fixup some typos
This commit is contained in:
@@ -398,8 +398,7 @@ static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int le
|
||||
if (op_is_write(op))
|
||||
memcpy(page_address(page), p, bytes);
|
||||
|
||||
if (bio_add_page(bio, page, bytes, 0) < bytes)
|
||||
break;
|
||||
__bio_add_page(bio, page, bytes, 0);
|
||||
|
||||
len -= bytes;
|
||||
p += bytes;
|
||||
|
||||
@@ -4793,38 +4793,45 @@ static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
|
||||
}
|
||||
}
|
||||
|
||||
static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
|
||||
int new_nr_hw_queues)
|
||||
static struct blk_mq_tags **blk_mq_prealloc_tag_set_tags(
|
||||
struct blk_mq_tag_set *set,
|
||||
int new_nr_hw_queues)
|
||||
{
|
||||
struct blk_mq_tags **new_tags;
|
||||
int i;
|
||||
|
||||
if (set->nr_hw_queues >= new_nr_hw_queues)
|
||||
goto done;
|
||||
return NULL;
|
||||
|
||||
new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
|
||||
GFP_KERNEL, set->numa_node);
|
||||
if (!new_tags)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (set->tags)
|
||||
memcpy(new_tags, set->tags, set->nr_hw_queues *
|
||||
sizeof(*set->tags));
|
||||
kfree(set->tags);
|
||||
set->tags = new_tags;
|
||||
|
||||
for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
|
||||
if (!__blk_mq_alloc_map_and_rqs(set, i)) {
|
||||
while (--i >= set->nr_hw_queues)
|
||||
__blk_mq_free_map_and_rqs(set, i);
|
||||
return -ENOMEM;
|
||||
if (blk_mq_is_shared_tags(set->flags)) {
|
||||
new_tags[i] = set->shared_tags;
|
||||
} else {
|
||||
new_tags[i] = blk_mq_alloc_map_and_rqs(set, i,
|
||||
set->queue_depth);
|
||||
if (!new_tags[i])
|
||||
goto out_unwind;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
done:
|
||||
set->nr_hw_queues = new_nr_hw_queues;
|
||||
return 0;
|
||||
return new_tags;
|
||||
out_unwind:
|
||||
while (--i >= set->nr_hw_queues) {
|
||||
if (!blk_mq_is_shared_tags(set->flags))
|
||||
blk_mq_free_map_and_rqs(set, new_tags[i], i);
|
||||
}
|
||||
kfree(new_tags);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -5113,6 +5120,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||
unsigned int memflags;
|
||||
int i;
|
||||
struct xarray elv_tbl;
|
||||
struct blk_mq_tags **new_tags;
|
||||
bool queues_frozen = false;
|
||||
|
||||
lockdep_assert_held(&set->tag_list_lock);
|
||||
@@ -5147,11 +5155,18 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||
if (blk_mq_elv_switch_none(q, &elv_tbl))
|
||||
goto switch_back;
|
||||
|
||||
new_tags = blk_mq_prealloc_tag_set_tags(set, nr_hw_queues);
|
||||
if (IS_ERR(new_tags))
|
||||
goto switch_back;
|
||||
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||
blk_mq_freeze_queue_nomemsave(q);
|
||||
queues_frozen = true;
|
||||
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
|
||||
goto switch_back;
|
||||
if (new_tags) {
|
||||
kfree(set->tags);
|
||||
set->tags = new_tags;
|
||||
}
|
||||
set->nr_hw_queues = nr_hw_queues;
|
||||
|
||||
fallback:
|
||||
blk_mq_update_queue_map(set);
|
||||
|
||||
@@ -78,8 +78,14 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count)
|
||||
/*
|
||||
* Serialize updating nr_requests with concurrent queue_requests_store()
|
||||
* and switching elevator.
|
||||
*
|
||||
* Use trylock to avoid circular lock dependency with kernfs active
|
||||
* reference during concurrent disk deletion:
|
||||
* update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del)
|
||||
* kn->active -> update_nr_hwq_lock (via this sysfs write path)
|
||||
*/
|
||||
down_write(&set->update_nr_hwq_lock);
|
||||
if (!down_write_trylock(&set->update_nr_hwq_lock))
|
||||
return -EBUSY;
|
||||
|
||||
if (nr == q->nr_requests)
|
||||
goto unlock;
|
||||
|
||||
@@ -807,7 +807,16 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
|
||||
elv_iosched_load_module(ctx.name);
|
||||
ctx.type = elevator_find_get(ctx.name);
|
||||
|
||||
down_read(&set->update_nr_hwq_lock);
|
||||
/*
|
||||
* Use trylock to avoid circular lock dependency with kernfs active
|
||||
* reference during concurrent disk deletion:
|
||||
* update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del)
|
||||
* kn->active -> update_nr_hwq_lock (via this sysfs write path)
|
||||
*/
|
||||
if (!down_read_trylock(&set->update_nr_hwq_lock)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
if (!blk_queue_no_elv_switch(q)) {
|
||||
ret = elevator_change(q, &ctx);
|
||||
if (!ret)
|
||||
@@ -817,6 +826,7 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
|
||||
}
|
||||
up_read(&set->update_nr_hwq_lock);
|
||||
|
||||
out:
|
||||
if (ctx.type)
|
||||
elevator_put(ctx.type);
|
||||
return ret;
|
||||
|
||||
Reference in New Issue
Block a user