Merge tag 'block-7.0-20260312' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
      - Fix nvme-pci IRQ race and slab-out-of-bounds access
      - Fix recursive workqueue locking for target async events
      - Various cleanups

 - Fix a potential NULL pointer dereference in ublk on size setting

 - ublk automatic partition scanning fix

 - Two s390 dasd fixes

* tag 'block-7.0-20260312' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
  nvme: Annotate struct nvme_dhchap_key with __counted_by
  nvme-core: do not pass empty queue_limits to blk_mq_alloc_queue()
  nvme-pci: Fix race bug in nvme_poll_irqdisable()
  nvmet: move async event work off nvmet-wq
  nvme-pci: Fix slab-out-of-bounds in nvme_dbbuf_set
  s390/dasd: Copy detected format information to secondary device
  s390/dasd: Move quiesce state with pprc swap
  ublk: don't clear GD_SUPPRESS_PART_SCAN for unprivileged daemons
  ublk: fix NULL pointer dereference in ublk_ctrl_set_size()
This commit is contained in:
Linus Torvalds
2026-03-13 10:13:06 -07:00
9 changed files with 50 additions and 13 deletions

View File

@@ -4443,7 +4443,9 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
/* Skip partition scan if disabled by user */
if (ub->dev_info.flags & UBLK_F_NO_AUTO_PART_SCAN) {
clear_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
/* Not clear for unprivileged daemons, see comment above */
if (!ub->unprivileged_daemons)
clear_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
} else {
/* Schedule async partition scan for trusted daemons */
if (!ub->unprivileged_daemons)
@@ -5006,15 +5008,22 @@ static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header)
return 0;
}
static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
static int ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
{
struct ublk_param_basic *p = &ub->params.basic;
u64 new_size = header->data[0];
int ret = 0;
mutex_lock(&ub->mutex);
if (!ub->ub_disk) {
ret = -ENODEV;
goto out;
}
p->dev_sectors = new_size;
set_capacity_and_notify(ub->ub_disk, p->dev_sectors);
out:
mutex_unlock(&ub->mutex);
return ret;
}
struct count_busy {
@@ -5335,8 +5344,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
ret = ublk_ctrl_end_recovery(ub, &header);
break;
case UBLK_CMD_UPDATE_SIZE:
ublk_ctrl_set_size(ub, &header);
ret = 0;
ret = ublk_ctrl_set_size(ub, &header);
break;
case UBLK_CMD_QUIESCE_DEV:
ret = ublk_ctrl_quiesce_dev(ub, &header);

View File

@@ -4834,7 +4834,6 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int cmd_size)
{
struct queue_limits lim = {};
int ret;
memset(set, 0, sizeof(*set));
@@ -4861,7 +4860,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ctrl->admin_q)
blk_put_queue(ctrl->admin_q);
ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
ctrl->admin_q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(ctrl->admin_q)) {
ret = PTR_ERR(ctrl->admin_q);
goto out_free_tagset;

View File

@@ -544,7 +544,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
/* Free memory and continue on */
nvme_dbbuf_dma_free(dev);
for (i = 1; i <= dev->online_queues; i++)
for (i = 1; i < dev->online_queues; i++)
nvme_dbbuf_free(&dev->queues[i]);
}
}
@@ -1625,14 +1625,16 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
{
struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
int irq;
WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
irq = pci_irq_vector(pdev, nvmeq->cq_vector);
disable_irq(irq);
spin_lock(&nvmeq->cq_poll_lock);
nvme_poll_cq(nvmeq, NULL);
spin_unlock(&nvmeq->cq_poll_lock);
enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
enable_irq(irq);
}
static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)

View File

@@ -1585,7 +1585,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
mutex_unlock(&ctrl->lock);
queue_work(nvmet_wq, &ctrl->async_event_work);
queue_work(nvmet_aen_wq, &ctrl->async_event_work);
}
void nvmet_execute_keep_alive(struct nvmet_req *req)

View File

@@ -27,6 +27,8 @@ static DEFINE_IDA(cntlid_ida);
struct workqueue_struct *nvmet_wq;
EXPORT_SYMBOL_GPL(nvmet_wq);
struct workqueue_struct *nvmet_aen_wq;
EXPORT_SYMBOL_GPL(nvmet_aen_wq);
/*
* This read/write semaphore is used to synchronize access to configuration
@@ -206,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock);
queue_work(nvmet_wq, &ctrl->async_event_work);
queue_work(nvmet_aen_wq, &ctrl->async_event_work);
}
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -1956,9 +1958,14 @@ static int __init nvmet_init(void)
if (!nvmet_wq)
goto out_free_buffered_work_queue;
nvmet_aen_wq = alloc_workqueue("nvmet-aen-wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!nvmet_aen_wq)
goto out_free_nvmet_work_queue;
error = nvmet_init_debugfs();
if (error)
goto out_free_nvmet_work_queue;
goto out_free_nvmet_aen_work_queue;
error = nvmet_init_discovery();
if (error)
@@ -1974,6 +1981,8 @@ out_exit_discovery:
nvmet_exit_discovery();
out_exit_debugfs:
nvmet_exit_debugfs();
out_free_nvmet_aen_work_queue:
destroy_workqueue(nvmet_aen_wq);
out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
@@ -1991,6 +2000,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_discovery();
nvmet_exit_debugfs();
ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_aen_wq);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq);

View File

@@ -501,6 +501,7 @@ extern struct kmem_cache *nvmet_bvec_cache;
extern struct workqueue_struct *buffered_io_wq;
extern struct workqueue_struct *zbd_wq;
extern struct workqueue_struct *nvmet_wq;
extern struct workqueue_struct *nvmet_aen_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{

View File

@@ -2087,6 +2087,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
mutex_unlock(&nvmet_rdma_queue_mutex);
flush_workqueue(nvmet_wq);
flush_workqueue(nvmet_aen_wq);
}
static struct ib_client nvmet_rdma_ib_client = {

View File

@@ -6135,6 +6135,7 @@ static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busi
static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
char *sec_busid)
{
struct dasd_eckd_private *prim_priv, *sec_priv;
struct dasd_device *primary, *secondary;
struct dasd_copy_relation *copy;
struct dasd_block *block;
@@ -6155,6 +6156,9 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid
if (!secondary)
return DASD_COPYPAIRSWAP_SECONDARY;
prim_priv = primary->private;
sec_priv = secondary->private;
/*
* usually the device should be quiesced for swap
* for paranoia stop device and requeue requests again
@@ -6182,6 +6186,18 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid
dev_name(&secondary->cdev->dev), rc);
}
if (primary->stopped & DASD_STOPPED_QUIESCE) {
dasd_device_set_stop_bits(secondary, DASD_STOPPED_QUIESCE);
dasd_device_remove_stop_bits(primary, DASD_STOPPED_QUIESCE);
}
/*
* The secondary device never got through format detection, but since it
* is a copy of the primary device, the format is exactly the same;
* therefore, the detected layout can simply be copied.
*/
sec_priv->uses_cdl = prim_priv->uses_cdl;
/* re-enable device */
dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);

View File

@@ -11,7 +11,7 @@
struct nvme_dhchap_key {
size_t len;
u8 hash;
u8 key[];
u8 key[] __counted_by(len);
};
u32 nvme_auth_get_seqnum(void);