2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

nvme: fix multiple spelling and grammar issues in host drivers

This commit fixes several typos and grammatical issues across various
nvme host driver files:

 - correct "glace" to "glance" in a comment in apple.c
 - fix "Idependent" to "Independent" in core.c
 - change "unsucceesful" to "unsuccessful", "they blk-mq" to "the blk-mq",
 - fix "terminaed" to "terminated" and other grammar in fc.c
 - update "O's" to "0's" to clarify meaning in nvme.h
 - fix a function name reference in a comment in zns.c:
   *_transter_len() -> *_transfer_len().
 - fix sysfs_emit() output format in pci.c (replace x%08x with 0x%08x)

These changes improve the code readability and documentation consistency
across the NVMe driver.

Signed-off-by: Alok Tiwari <alok.a.tiwari@oracle.com>
Reviewed-by: Randy Dunlap <rdunlap@infradead.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Alok Tiwari 2025-06-24 21:16:30 -07:00 committed by Christoph Hellwig
parent ab17ead0e0
commit 164c187d25
8 changed files with 14 additions and 14 deletions

View File

@ -301,8 +301,8 @@ static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
memcpy(&q->sqes[tag], cmd, sizeof(*cmd)); memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
/* /*
* This lock here doesn't make much sense at a first glace but * This lock here doesn't make much sense at a first glance but
* removing it will result in occasional missed completetion * removing it will result in occasional missed completion
* interrupts even though the commands still appear on the CQ. * interrupts even though the commands still appear on the CQ.
* It's unclear why this happens but our best guess is that * It's unclear why this happens but our best guess is that
* there is a bug in the firmware triggered when a new command * there is a bug in the firmware triggered when a new command

View File

@ -4286,7 +4286,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
} }
/* /*
* If available try to use the Command Set Idependent Identify Namespace * If available try to use the Command Set Independent Identify Namespace
* data structure to find all the generic information that is needed to * data structure to find all the generic information that is needed to
* set up a namespace. If not fall back to the legacy version. * set up a namespace. If not fall back to the legacy version.
*/ */

View File

@ -899,7 +899,7 @@ EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
* may crash. * may crash.
* *
* As such: * As such:
* Wrapper all the dma routines and check the dev pointer. * Wrap all the dma routines and check the dev pointer.
* *
* If simple mappings (return just a dma address, we'll noop them, * If simple mappings (return just a dma address, we'll noop them,
* returning a dma address of 0. * returning a dma address of 0.
@ -1955,8 +1955,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
} }
/* /*
* For the linux implementation, if we have an unsucceesful * For the linux implementation, if we have an unsuccessful
* status, they blk-mq layer can typically be called with the * status, the blk-mq layer can typically be called with the
* non-zero status and the content of the cqe isn't important. * non-zero status and the content of the cqe isn't important.
*/ */
if (status) if (status)
@ -2429,7 +2429,7 @@ static bool nvme_fc_terminate_exchange(struct request *req, void *data)
/* /*
* This routine runs through all outstanding commands on the association * This routine runs through all outstanding commands on the association
* and aborts them. This routine is typically be called by the * and aborts them. This routine is typically called by the
* delete_association routine. It is also called due to an error during * delete_association routine. It is also called due to an error during
* reconnect. In that scenario, it is most likely a command that initializes * reconnect. In that scenario, it is most likely a command that initializes
* the controller, including fabric Connect commands on io queues, that * the controller, including fabric Connect commands on io queues, that
@ -2622,7 +2622,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
* as part of the exchange. The CQE is the last thing for the io, * as part of the exchange. The CQE is the last thing for the io,
* which is transferred (explicitly or implicitly) with the RSP IU * which is transferred (explicitly or implicitly) with the RSP IU
* sent on the exchange. After the CQE is received, the FC exchange is * sent on the exchange. After the CQE is received, the FC exchange is
* terminaed and the Exchange may be used on a different io. * terminated and the Exchange may be used on a different io.
* *
* The transport to LLDD api has the transport making a request for a * The transport to LLDD api has the transport making a request for a
* new fcp io request to the LLDD. The LLDD then allocates a FC exchange * new fcp io request to the LLDD. The LLDD then allocates a FC exchange

View File

@ -69,7 +69,7 @@ enum nvme_quirks {
NVME_QUIRK_IDENTIFY_CNS = (1 << 1), NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
/* /*
* The controller deterministically returns O's on reads to * The controller deterministically returns 0's on reads to
* logical blocks that deallocate was called on. * logical blocks that deallocate was called on.
*/ */
NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),

View File

@ -2439,7 +2439,7 @@ static ssize_t cmb_show(struct device *dev, struct device_attribute *attr,
{ {
struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", return sysfs_emit(buf, "cmbloc : 0x%08x\ncmbsz : 0x%08x\n",
ndev->cmbloc, ndev->cmbsz); ndev->cmbloc, ndev->cmbsz);
} }
static DEVICE_ATTR_RO(cmb); static DEVICE_ATTR_RO(cmb);

View File

@ -877,7 +877,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
/* /*
* Only start IO queues for which we have allocated the tagset * Only start IO queues for which we have allocated the tagset
* and limitted it to the available queues. On reconnects, the * and limited it to the available queues. On reconnects, the
* queue number might have changed. * queue number might have changed.
*/ */
nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);

View File

@ -106,7 +106,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
pctrl->max_hw_sectors); pctrl->max_hw_sectors);
/* /*
* nvmet_passthru_map_sg is limitted to using a single bio so limit * nvmet_passthru_map_sg is limited to using a single bio so limit
* the mdts based on BIO_MAX_VECS as well * the mdts based on BIO_MAX_VECS as well
*/ */
max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT, max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT,
@ -147,7 +147,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
* When passthru controller is setup using nvme-loop transport it will * When passthru controller is setup using nvme-loop transport it will
* export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
* the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl() * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
* code path with duplicate ctr subsynqn. In order to prevent that we * code path with duplicate ctrl subsysnqn. In order to prevent that we
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
*/ */
memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));

View File

@ -541,7 +541,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
struct bio *bio; struct bio *bio;
int sg_cnt; int sg_cnt;
/* Request is completed on len mismatch in nvmet_check_transter_len() */ /* Request is completed on len mismatch in nvmet_check_transfer_len() */
if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
return; return;