From f9ef8dedee34e2d7828d5a6a0643cd969aaa8437 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 8 Dec 2025 03:07:30 +0100 Subject: [PATCH 01/64] dmaengine: dw-edma: Fix confusing cleanup.h syntax Initializing automatic __free variables to NULL without need (e.g. branches with different allocations), followed by actual allocation is in contrary to explicit coding rules guiding cleanup.h: "Given that the "__free(...) = NULL" pattern for variables defined at the top of the function poses this potential interdependency problem the recommendation is to always define and assign variables in one statement and not group variable definitions at the top of the function when __free() is used." Code does not have a bug, but is less readable and uses discouraged coding practice, so fix that by moving declaration to the place of assignment. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Manivannan Sadhasivam Link: https://patch.msgid.link/20251208020729.4654-2-krzysztof.kozlowski@oss.qualcomm.com Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/dw-edma-pcie.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c index 3371e0a76d3c..e9caf8adca1f 100644 --- a/drivers/dma/dw-edma/dw-edma-pcie.c +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -161,13 +161,13 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; - struct dw_edma_pcie_data *vsec_data __free(kfree) = NULL; struct device *dev = &pdev->dev; struct dw_edma_chip *chip; int err, nr_irqs; int i, mask; - vsec_data = kmalloc(sizeof(*vsec_data), GFP_KERNEL); + struct dw_edma_pcie_data *vsec_data __free(kfree) = + kmalloc(sizeof(*vsec_data), GFP_KERNEL); if (!vsec_data) return -ENOMEM; From 892f2bb487916cb15432912cd6aae445ab2f48f0 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Thu, 6 Nov 2025 16:44:50 +0100 Subject: [PATCH 02/64] dmaengine: qcom: bam_dma: order includes alphabetically For easier maintenance and better readability order all includes alphabetically. Signed-off-by: Bartosz Golaszewski Reviewed-by: Dmitry Baryshkov Link: https://patch.msgid.link/20251106-qcom-bam-dma-refactor-v1-1-0e2baaf3d81a@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 2cf060174795..2f1f295d3e1f 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -23,24 +23,24 @@ * indication of where the hardware is currently working. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include "../dmaengine.h" #include "../virt-dma.h" From 20f581834aacd743b3d95bbbb37a802d14cf3690 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Thu, 6 Nov 2025 16:44:51 +0100 Subject: [PATCH 03/64] dmaengine: qcom: bam_dma: use lock guards Simplify locking across the driver with lock guards from cleanup.h. Signed-off-by: Bartosz Golaszewski Link: https://patch.msgid.link/20251106-qcom-bam-dma-refactor-v1-2-0e2baaf3d81a@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 124 ++++++++++++++++--------------------- 1 file changed, 55 insertions(+), 69 deletions(-) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 2f1f295d3e1f..bcd8de9a9a12 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -24,6 +24,7 @@ */ #include +#include #include #include #include @@ -570,7 +571,6 @@ static void bam_free_chan(struct dma_chan *chan) struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; u32 val; - unsigned long flags; int ret; ret = pm_runtime_get_sync(bdev->dev); @@ -584,9 +584,8 @@ static void bam_free_chan(struct dma_chan *chan) goto err; } - spin_lock_irqsave(&bchan->vc.lock, flags); - bam_reset_channel(bchan); - spin_unlock_irqrestore(&bchan->vc.lock, flags); + scoped_guard(spinlock_irqsave, &bchan->vc.lock) + bam_reset_channel(bchan); dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, bchan->fifo_phys); @@ -624,12 +623,11 @@ static int bam_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct bam_chan *bchan = to_bam_chan(chan); - unsigned long flag; - spin_lock_irqsave(&bchan->vc.lock, flag); + guard(spinlock_irqsave)(&bchan->vc.lock); + memcpy(&bchan->slave, cfg, sizeof(*cfg)); bchan->reconfigure = 1; - spin_unlock_irqrestore(&bchan->vc.lock, flag); return 0; } @@ -726,38 +724,37 @@ static int bam_dma_terminate_all(struct dma_chan *chan) { struct bam_chan *bchan = to_bam_chan(chan); struct bam_async_desc *async_desc, *tmp; - unsigned long flag; LIST_HEAD(head); /* remove all transactions, including active transaction */ - spin_lock_irqsave(&bchan->vc.lock, flag); - /* - * If we have transactions queued, then some might be committed to the - * hardware in the desc fifo. The only way to reset the desc fifo is - * to do a hardware reset (either by pipe or the entire block). - * bam_chan_init_hw() will trigger a pipe reset, and also reinit the - * pipe. If the pipe is left disabled (default state after pipe reset) - * and is accessed by a connected hardware engine, a fatal error in - * the BAM will occur. There is a small window where this could happen - * with bam_chan_init_hw(), but it is assumed that the caller has - * stopped activity on any attached hardware engine. Make sure to do - * this first so that the BAM hardware doesn't cause memory corruption - * by accessing freed resources. - */ - if (!list_empty(&bchan->desc_list)) { - async_desc = list_first_entry(&bchan->desc_list, - struct bam_async_desc, desc_node); - bam_chan_init_hw(bchan, async_desc->dir); - } + scoped_guard(spinlock_irqsave, &bchan->vc.lock) { + /* + * If we have transactions queued, then some might be committed to the + * hardware in the desc fifo. The only way to reset the desc fifo is + * to do a hardware reset (either by pipe or the entire block). + * bam_chan_init_hw() will trigger a pipe reset, and also reinit the + * pipe. If the pipe is left disabled (default state after pipe reset) + * and is accessed by a connected hardware engine, a fatal error in + * the BAM will occur. There is a small window where this could happen + * with bam_chan_init_hw(), but it is assumed that the caller has + * stopped activity on any attached hardware engine. Make sure to do + * this first so that the BAM hardware doesn't cause memory corruption + * by accessing freed resources. + */ + if (!list_empty(&bchan->desc_list)) { + async_desc = list_first_entry(&bchan->desc_list, + struct bam_async_desc, desc_node); + bam_chan_init_hw(bchan, async_desc->dir); + } - list_for_each_entry_safe(async_desc, tmp, - &bchan->desc_list, desc_node) { - list_add(&async_desc->vd.node, &bchan->vc.desc_issued); - list_del(&async_desc->desc_node); - } + list_for_each_entry_safe(async_desc, tmp, + &bchan->desc_list, desc_node) { + list_add(&async_desc->vd.node, &bchan->vc.desc_issued); + list_del(&async_desc->desc_node); + } - vchan_get_all_descriptors(&bchan->vc, &head); - spin_unlock_irqrestore(&bchan->vc.lock, flag); + vchan_get_all_descriptors(&bchan->vc, &head); + } vchan_dma_desc_free_list(&bchan->vc, &head); @@ -773,17 +770,16 @@ static int bam_pause(struct dma_chan *chan) { struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; - unsigned long flag; int ret; ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return ret; - spin_lock_irqsave(&bchan->vc.lock, flag); - writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); - bchan->paused = 1; - spin_unlock_irqrestore(&bchan->vc.lock, flag); + scoped_guard(spinlock_irqsave, &bchan->vc.lock) { + writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); + bchan->paused = 1; + } pm_runtime_mark_last_busy(bdev->dev); pm_runtime_put_autosuspend(bdev->dev); @@ -799,17 +795,16 @@ static int bam_resume(struct dma_chan *chan) { struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; - unsigned long flag; int ret; ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return ret; - spin_lock_irqsave(&bchan->vc.lock, flag); - writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); - bchan->paused = 0; - spin_unlock_irqrestore(&bchan->vc.lock, flag); + scoped_guard(spinlock_irqsave, &bchan->vc.lock) { + writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); + bchan->paused = 0; + } pm_runtime_mark_last_busy(bdev->dev); pm_runtime_put_autosuspend(bdev->dev); @@ -826,7 +821,6 @@ static int bam_resume(struct dma_chan *chan) static u32 process_channel_irqs(struct bam_device *bdev) { u32 i, srcs, pipe_stts, offset, avail; - unsigned long flags; struct bam_async_desc *async_desc, *tmp; srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); @@ -846,7 +840,7 @@ static u32 process_channel_irqs(struct bam_device *bdev) writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); - spin_lock_irqsave(&bchan->vc.lock, flags); + guard(spinlock_irqsave)(&bchan->vc.lock); offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) & P_SW_OFSTS_MASK; @@ -885,8 +879,6 @@ static u32 process_channel_irqs(struct bam_device *bdev) } list_del(&async_desc->desc_node); } - - spin_unlock_irqrestore(&bchan->vc.lock, flags); } return srcs; @@ -950,7 +942,6 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, int ret; size_t residue = 0; unsigned int i; - unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) @@ -959,23 +950,22 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, if (!txstate) return bchan->paused ? DMA_PAUSED : ret; - spin_lock_irqsave(&bchan->vc.lock, flags); - vd = vchan_find_desc(&bchan->vc, cookie); - if (vd) { - residue = container_of(vd, struct bam_async_desc, vd)->length; - } else { - list_for_each_entry(async_desc, &bchan->desc_list, desc_node) { - if (async_desc->vd.tx.cookie != cookie) - continue; + scoped_guard(spinlock_irqsave, &bchan->vc.lock) { + vd = vchan_find_desc(&bchan->vc, cookie); + if (vd) { + residue = container_of(vd, struct bam_async_desc, vd)->length; + } else { + list_for_each_entry(async_desc, &bchan->desc_list, desc_node) { + if (async_desc->vd.tx.cookie != cookie) + continue; - for (i = 0; i < async_desc->num_desc; i++) - residue += le16_to_cpu( - async_desc->curr_desc[i].size); + for (i = 0; i < async_desc->num_desc; i++) + residue += le16_to_cpu( + async_desc->curr_desc[i].size); + } } } - spin_unlock_irqrestore(&bchan->vc.lock, flags); - dma_set_residue(txstate, residue); if (ret == DMA_IN_PROGRESS && bchan->paused) @@ -1116,17 +1106,16 @@ static void dma_tasklet(struct tasklet_struct *t) { struct bam_device *bdev = from_tasklet(bdev, t, task); struct bam_chan *bchan; - unsigned long flags; unsigned int i; /* go through the channels and kick off transactions */ for (i = 0; i < bdev->num_channels; i++) { bchan = &bdev->channels[i]; - spin_lock_irqsave(&bchan->vc.lock, flags); + + guard(spinlock_irqsave)(&bchan->vc.lock); if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan)) bam_start_dma(bchan); - spin_unlock_irqrestore(&bchan->vc.lock, flags); } } @@ -1140,15 +1129,12 @@ static void dma_tasklet(struct tasklet_struct *t) static void bam_issue_pending(struct dma_chan *chan) { struct bam_chan *bchan = to_bam_chan(chan); - unsigned long flags; - spin_lock_irqsave(&bchan->vc.lock, flags); + guard(spinlock_irqsave)(&bchan->vc.lock); /* if work pending and idle, start a transaction */ if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan)) bam_start_dma(bchan); - - spin_unlock_irqrestore(&bchan->vc.lock, flags); } /** From f94163e950c9568fe2d2d88317d9602ce021e646 Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Tue, 25 Nov 2025 21:26:21 +0000 Subject: [PATCH 04/64] dt-bindings: dma: rz-dmac: Document RZ/V2N SoC support Document the DMA controller on the Renesas RZ/V2N SoC, which is architecturally identical to the DMAC found on the RZ/V2H(P) SoC. Signed-off-by: Lad Prabhakar Acked-by: Krzysztof Kozlowski Reviewed-by: Geert Uytterhoeven Link: https://patch.msgid.link/20251125212621.267397-1-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml index f891cfcc48c7..d137b9cbaee9 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml +++ b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml @@ -24,6 +24,7 @@ properties: - items: - enum: - renesas,r9a09g047-dmac # RZ/G3E + - renesas,r9a09g056-dmac # RZ/V2N - const: renesas,r9a09g057-dmac - const: renesas,r9a09g057-dmac # RZ/V2H(P) From 08be54a9e56f9523b50d1923a94a48ef5890c0bc Mon Sep 17 00:00:00 2001 From: Bhanu Seshu Kumar Valluri Date: Thu, 13 Nov 2025 12:19:37 +0530 Subject: [PATCH 05/64] docs: dmaengine: add explanation for phys field in dma_async_tx_descriptor structure Describe the need to initialize the phys field in the dma_async_tx_descriptor structure during its initialization. Signed-off-by: Bhanu Seshu Kumar Valluri Reviewed-by: Bagas Sanjaya Link: https://patch.msgid.link/20251113064937.8735-1-bhanuseshukumar@gmail.com Signed-off-by: Vinod Koul --- Documentation/driver-api/dmaengine/provider.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst index 1594598b3317..f4ed98f701c9 100644 --- a/Documentation/driver-api/dmaengine/provider.rst +++ b/Documentation/driver-api/dmaengine/provider.rst @@ -411,7 +411,7 @@ supported. - This structure can be initialized using the function ``dma_async_tx_descriptor_init``. - - You'll also need to set two fields in this structure: + - You'll also need to set following fields in this structure: - flags: TODO: Can it be modified by the driver itself, or @@ -421,6 +421,9 @@ supported. that is supposed to push the current transaction descriptor to a pending queue, waiting for issue_pending to be called. + - phys: Physical address of the descriptor which is used later by + the dma engine to read the descriptor and initiate transfer. + - In this structure the function pointer callback_result can be initialized in order for the submitter to be notified that a transaction has completed. In the earlier code the function pointer From bbfb8677d31a78a898c8d02e3ca58790b89a6dda Mon Sep 17 00:00:00 2001 From: Chu Guangqing Date: Tue, 25 Nov 2025 09:57:34 +0800 Subject: [PATCH 06/64] dmaengine: pl08x: Fix a spelling mistake "Accound" is an archaic spelling variant of "account". It had completely fallen out of formal use as early as the 17th century, when the spelling of Modern English became standardized. Here, it is corrected to "account" to enhance comprehension. Signed-off-by: Chu Guangqing Link: https://patch.msgid.link/20251125015734.1572-1-chuguangqing@inspur.com Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 38cdbca59485..3bfb3b312027 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1010,7 +1010,7 @@ static inline u32 pl08x_lli_control_bits(struct pl08x_driver_data *pl08x, /* * Remove all src, dst and transfer size bits, then set the * width and size according to the parameters. The bit offsets - * are different in the FTDMAC020 so we need to accound for this. + * are different in the FTDMAC020 so we need to account for this. */ if (pl08x->vd->ftdmac020) { retbits &= ~FTDMAC020_LLI_DST_WIDTH_MSK; From 0d41ed4ea496fabbb4dc21171e32d9a924c2a661 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Fri, 21 Nov 2025 14:36:56 +0100 Subject: [PATCH 07/64] dmaengine: stm32-dma3: use module_platform_driver Without module_platform_driver(), stm32-dma3 doesn't have a module_exit procedure. Once stm32-dma3 module is inserted, it can't be removed, marked busy. Use module_platform_driver() instead of subsys_initcall() to register (insmod) and unregister (rmmod) stm32-dma3 driver. Reviewed-by: Eugen Hristev Signed-off-by: Amelie Delaunay Link: https://patch.msgid.link/20251121-dma3_improv-v2-1-76a207b13ea6@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32/stm32-dma3.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c index 50e7106c5cb7..9500164c8f68 100644 --- a/drivers/dma/stm32/stm32-dma3.c +++ b/drivers/dma/stm32/stm32-dma3.c @@ -1914,12 +1914,7 @@ static struct platform_driver stm32_dma3_driver = { }, }; -static int __init stm32_dma3_init(void) -{ - return platform_driver_register(&stm32_dma3_driver); -} - -subsys_initcall(stm32_dma3_init); +module_platform_driver(stm32_dma3_driver); MODULE_DESCRIPTION("STM32 DMA3 controller driver"); MODULE_AUTHOR("Amelie Delaunay "); From d26eb4a75a4a2bbf27305e62ad82cedf5f8c577c Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Fri, 21 Nov 2025 14:36:57 +0100 Subject: [PATCH 08/64] dmaengine: stm32-dma3: introduce channel semaphore helpers Before restoring semaphore status after suspend, introduce new functions to handle semaphore operations : - stm32_dma3_get_chan_sem() to take the semaphore - stm32_dma3_put_chan_sem() to release the semaphore Also, use a new boolean variable semaphore_taken, which is true when the semaphore has been taken and false when it has been released. Signed-off-by: Amelie Delaunay Link: https://patch.msgid.link/20251121-dma3_improv-v2-2-76a207b13ea6@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32/stm32-dma3.c | 55 +++++++++++++++++++++++++++------- 1 file changed, 44 insertions(+), 11 deletions(-) diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c index 9500164c8f68..a1583face7ec 100644 --- a/drivers/dma/stm32/stm32-dma3.c +++ b/drivers/dma/stm32/stm32-dma3.c @@ -288,6 +288,7 @@ struct stm32_dma3_chan { u32 fifo_size; u32 max_burst; bool semaphore_mode; + bool semaphore_taken; struct stm32_dma3_dt_conf dt_config; struct dma_slave_config dma_config; u8 config_set; @@ -1063,11 +1064,50 @@ static irqreturn_t stm32_dma3_chan_irq(int irq, void *devid) return IRQ_HANDLED; } +static int stm32_dma3_get_chan_sem(struct stm32_dma3_chan *chan) +{ + struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); + u32 csemcr, ccid; + + csemcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id)); + /* Make an attempt to take the channel semaphore if not already taken */ + if (!(csemcr & CSEMCR_SEM_MUTEX)) { + writel_relaxed(CSEMCR_SEM_MUTEX, ddata->base + STM32_DMA3_CSEMCR(chan->id)); + csemcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id)); + } + + /* Check if channel is under CID1 control */ + ccid = FIELD_GET(CSEMCR_SEM_CCID, csemcr); + if (!(csemcr & CSEMCR_SEM_MUTEX) || ccid != CCIDCFGR_CID1) + goto bad_cid; + + chan->semaphore_taken = true; + dev_dbg(chan2dev(chan), "under CID1 control (semcr=0x%08x)\n", csemcr); + + return 0; + +bad_cid: + chan->semaphore_taken = false; + dev_err(chan2dev(chan), "not under CID1 control (in-use by CID%d)\n", ccid); + + return -EACCES; +} + +static void stm32_dma3_put_chan_sem(struct stm32_dma3_chan *chan) +{ + struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); + + if (chan->semaphore_taken) { + writel_relaxed(0, ddata->base + STM32_DMA3_CSEMCR(chan->id)); + chan->semaphore_taken = false; + dev_dbg(chan2dev(chan), "no more under CID1 control\n"); + } +} + static int stm32_dma3_alloc_chan_resources(struct dma_chan *c) { struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); - u32 id = chan->id, csemcr, ccid; int ret; ret = pm_runtime_resume_and_get(ddata->dma_dev.dev); @@ -1092,16 +1132,9 @@ static int stm32_dma3_alloc_chan_resources(struct dma_chan *c) /* Take the channel semaphore */ if (chan->semaphore_mode) { - writel_relaxed(CSEMCR_SEM_MUTEX, ddata->base + STM32_DMA3_CSEMCR(id)); - csemcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(id)); - ccid = FIELD_GET(CSEMCR_SEM_CCID, csemcr); - /* Check that the channel is well taken */ - if (ccid != CCIDCFGR_CID1) { - dev_err(chan2dev(chan), "Not under CID1 control (in-use by CID%d)\n", ccid); - ret = -EPERM; + ret = stm32_dma3_get_chan_sem(chan); + if (ret) goto err_pool_destroy; - } - dev_dbg(chan2dev(chan), "Under CID1 control (semcr=0x%08x)\n", csemcr); } return 0; @@ -1135,7 +1168,7 @@ static void stm32_dma3_free_chan_resources(struct dma_chan *c) /* Release the channel semaphore */ if (chan->semaphore_mode) - writel_relaxed(0, ddata->base + STM32_DMA3_CSEMCR(chan->id)); + stm32_dma3_put_chan_sem(chan); pm_runtime_put_sync(ddata->dma_dev.dev); From dea737e31c2c62df5a45871bfb4ceb90a112dbd8 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Fri, 21 Nov 2025 14:36:58 +0100 Subject: [PATCH 09/64] dmaengine: stm32-dma3: restore channel semaphore status after suspend Depending on the power state reached during suspend, the CxSEMCR register could have been reset, and the semaphore released. On resume, try to take the semaphore again. If the semaphore cannot be taken, an error log displaying the channel number and channel user is generated. This requires introducing two new functions: stm32_dma3_pm_suspend(), where the status of each channel is checked because suspension is not allowed if a channel is still running; stm32_dma3_pm_resume(), where the channel semaphore is restored if it was taken before suspend. Signed-off-by: Amelie Delaunay Link: https://patch.msgid.link/20251121-dma3_improv-v2-3-76a207b13ea6@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32/stm32-dma3.c | 75 +++++++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c index a1583face7ec..29ea510fa539 100644 --- a/drivers/dma/stm32/stm32-dma3.c +++ b/drivers/dma/stm32/stm32-dma3.c @@ -1237,6 +1237,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_cha bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) || !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf); + /* Semaphore could be lost during suspend/resume */ + if (chan->semaphore_mode && !chan->semaphore_taken) + return NULL; + count = stm32_dma3_get_ll_count(chan, len, prevent_refactor); swdesc = stm32_dma3_chan_desc_alloc(chan, count); @@ -1297,6 +1301,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf); int ret; + /* Semaphore could be lost during suspend/resume */ + if (chan->semaphore_mode && !chan->semaphore_taken) + return NULL; + count = 0; for_each_sg(sgl, sg, sg_len, i) count += stm32_dma3_get_ll_count(chan, sg_dma_len(sg), prevent_refactor); @@ -1383,6 +1391,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_cyclic(struct dma_cha u32 count, i, ctr1, ctr2; int ret; + /* Semaphore could be lost during suspend/resume */ + if (chan->semaphore_mode && !chan->semaphore_taken) + return NULL; + if (!buf_len || !period_len || period_len > STM32_DMA3_MAX_BLOCK_SIZE) { dev_err(chan2dev(chan), "Invalid buffer/period length\n"); return NULL; @@ -1932,8 +1944,69 @@ static int stm32_dma3_runtime_resume(struct device *dev) return ret; } +static int stm32_dma3_pm_suspend(struct device *dev) +{ + struct stm32_dma3_ddata *ddata = dev_get_drvdata(dev); + struct dma_device *dma_dev = &ddata->dma_dev; + struct dma_chan *c; + int ccr, ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + list_for_each_entry(c, &dma_dev->channels, device_node) { + struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); + + ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)); + if (ccr & CCR_EN) { + dev_warn(dev, "Suspend is prevented: %s still in use by %s\n", + dma_chan_name(c), dev_name(c->slave)); + pm_runtime_put_sync(dev); + return -EBUSY; + } + } + + pm_runtime_put_sync(dev); + + pm_runtime_force_suspend(dev); + + return 0; +} + +static int stm32_dma3_pm_resume(struct device *dev) +{ + struct stm32_dma3_ddata *ddata = dev_get_drvdata(dev); + struct dma_device *dma_dev = &ddata->dma_dev; + struct dma_chan *c; + int ret; + + ret = pm_runtime_force_resume(dev); + if (ret < 0) + return ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + /* + * Channel semaphores need to be restored in case of registers reset during low power. + * stm32_dma3_get_chan_sem() will prior check the semaphore status. + */ + list_for_each_entry(c, &dma_dev->channels, device_node) { + struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); + + if (chan->semaphore_mode && chan->semaphore_taken) + stm32_dma3_get_chan_sem(chan); + } + + pm_runtime_put_sync(dev); + + return 0; +} + static const struct dev_pm_ops stm32_dma3_pm_ops = { - SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) + SYSTEM_SLEEP_PM_OPS(stm32_dma3_pm_suspend, stm32_dma3_pm_resume) RUNTIME_PM_OPS(stm32_dma3_runtime_suspend, stm32_dma3_runtime_resume, NULL) }; From 8be4f3cbe263d22053d7afea4efee2e7178eee21 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Fri, 21 Nov 2025 14:36:59 +0100 Subject: [PATCH 10/64] dmaengine: stm32-dma3: introduce ddata2dev helper The purpose of this helper is to 'standardize' device pointer retrieval, similar to the chan2dev() helper. ddata2dev() helper returns the device pointer from struct dma_device stored in stm32_dma3_ddata structure. Device pointer from struct dma_device has been initialized with &pdev->dev, so the ddata2dev helper returns &pdev->dev. Signed-off-by: Amelie Delaunay Link: https://patch.msgid.link/20251121-dma3_improv-v2-4-76a207b13ea6@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32/stm32-dma3.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c index 29ea510fa539..84b00c436134 100644 --- a/drivers/dma/stm32/stm32-dma3.c +++ b/drivers/dma/stm32/stm32-dma3.c @@ -333,6 +333,11 @@ static struct device *chan2dev(struct stm32_dma3_chan *chan) return &chan->vchan.chan.dev->device; } +static struct device *ddata2dev(struct stm32_dma3_ddata *ddata) +{ + return ddata->dma_dev.dev; +} + static void stm32_dma3_chan_dump_reg(struct stm32_dma3_chan *chan) { struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); @@ -1110,7 +1115,7 @@ static int stm32_dma3_alloc_chan_resources(struct dma_chan *c) struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); int ret; - ret = pm_runtime_resume_and_get(ddata->dma_dev.dev); + ret = pm_runtime_resume_and_get(ddata2dev(ddata)); if (ret < 0) return ret; @@ -1144,7 +1149,7 @@ err_pool_destroy: chan->lli_pool = NULL; err_put_sync: - pm_runtime_put_sync(ddata->dma_dev.dev); + pm_runtime_put_sync(ddata2dev(ddata)); return ret; } @@ -1170,7 +1175,7 @@ static void stm32_dma3_free_chan_resources(struct dma_chan *c) if (chan->semaphore_mode) stm32_dma3_put_chan_sem(chan); - pm_runtime_put_sync(ddata->dma_dev.dev); + pm_runtime_put_sync(ddata2dev(ddata)); /* Reset configuration */ memset(&chan->dt_config, 0, sizeof(chan->dt_config)); @@ -1610,11 +1615,11 @@ static bool stm32_dma3_filter_fn(struct dma_chan *c, void *fn_param) if (!(mask & BIT(chan->id))) return false; - ret = pm_runtime_resume_and_get(ddata->dma_dev.dev); + ret = pm_runtime_resume_and_get(ddata2dev(ddata)); if (ret < 0) return false; semcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id)); - pm_runtime_put_sync(ddata->dma_dev.dev); + pm_runtime_put_sync(ddata2dev(ddata)); /* Check if chan is free */ if (semcr & CSEMCR_SEM_MUTEX) @@ -1636,7 +1641,7 @@ static struct dma_chan *stm32_dma3_of_xlate(struct of_phandle_args *dma_spec, st struct dma_chan *c; if (dma_spec->args_count < 3) { - dev_err(ddata->dma_dev.dev, "Invalid args count\n"); + dev_err(ddata2dev(ddata), "Invalid args count\n"); return NULL; } @@ -1645,14 +1650,14 @@ static struct dma_chan *stm32_dma3_of_xlate(struct of_phandle_args *dma_spec, st conf.tr_conf = dma_spec->args[2]; if (conf.req_line >= ddata->dma_requests) { - dev_err(ddata->dma_dev.dev, "Invalid request line\n"); + dev_err(ddata2dev(ddata), "Invalid request line\n"); return NULL; } /* Request dma channel among the generic dma controller list */ c = dma_request_channel(mask, stm32_dma3_filter_fn, &conf); if (!c) { - dev_err(ddata->dma_dev.dev, "No suitable channel found\n"); + dev_err(ddata2dev(ddata), "No suitable channel found\n"); return NULL; } @@ -1665,6 +1670,7 @@ static struct dma_chan *stm32_dma3_of_xlate(struct of_phandle_args *dma_spec, st static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata) { + struct device *dev = ddata2dev(ddata); u32 chan_reserved, mask = 0, i, ccidcfgr, invalid_cid = 0; /* Reserve Secure channels */ @@ -1676,7 +1682,7 @@ static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata) * In case CID filtering is not configured, dma-channel-mask property can be used to * specify available DMA channels to the kernel. */ - of_property_read_u32(ddata->dma_dev.dev->of_node, "dma-channel-mask", &mask); + of_property_read_u32(dev->of_node, "dma-channel-mask", &mask); /* Reserve !CID-filtered not in dma-channel-mask, static CID != CID1, CID1 not allowed */ for (i = 0; i < ddata->dma_channels; i++) { @@ -1696,7 +1702,7 @@ static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata) ddata->chans[i].semaphore_mode = true; } } - dev_dbg(ddata->dma_dev.dev, "chan%d: %s mode, %s\n", i, + dev_dbg(dev, "chan%d: %s mode, %s\n", i, !(ccidcfgr & CCIDCFGR_CFEN) ? "!CID-filtered" : ddata->chans[i].semaphore_mode ? "Semaphore" : "Static CID", (chan_reserved & BIT(i)) ? "denied" : @@ -1704,7 +1710,7 @@ static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata) } if (invalid_cid) - dev_warn(ddata->dma_dev.dev, "chan%*pbl have invalid CID configuration\n", + dev_warn(dev, "chan%*pbl have invalid CID configuration\n", ddata->dma_channels, &invalid_cid); return chan_reserved; From c381f1a38a4c7542cc6ec049d4dcff90a9423e89 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 17 Nov 2025 17:18:51 +0100 Subject: [PATCH 11/64] dmaengine: ti: k3-udma: enable compile testing There does not seem to be anything preventing the K3 UDMA drivers from being compile tested (on arm64 as one dependency depends on ARM64) so enable compile testing for wider build coverage. Note that the ring accelerator dependency can only be selected when "TI SOC drivers support" (SOC_TI) is enabled so select that option too. Signed-off-by: Johan Hovold Link: https://patch.msgid.link/20251117161851.11242-1-johan@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/ti/Kconfig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig index dbf168146d35..cbc30ab62783 100644 --- a/drivers/dma/ti/Kconfig +++ b/drivers/dma/ti/Kconfig @@ -36,11 +36,12 @@ config DMA_OMAP config TI_K3_UDMA tristate "Texas Instruments UDMA support" - depends on ARCH_K3 + depends on ARCH_K3 || COMPILE_TEST depends on TI_SCI_PROTOCOL depends on TI_SCI_INTA_IRQCHIP select DMA_ENGINE select DMA_VIRTUAL_CHANNELS + select SOC_TI select TI_K3_RINGACC select TI_K3_PSIL help @@ -49,7 +50,7 @@ config TI_K3_UDMA config TI_K3_UDMA_GLUE_LAYER tristate "Texas Instruments UDMA Glue layer for non DMAengine users" - depends on ARCH_K3 + depends on ARCH_K3 || COMPILE_TEST depends on TI_K3_UDMA help Say y here to support the K3 NAVSS DMA glue interface From bce33c132a2061c9a7958474c3e2d030c22664de Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Mon, 27 Oct 2025 15:32:27 +0200 Subject: [PATCH 12/64] dmaengine: at_xdmac: Remove redundant pm_runtime_mark_last_busy() calls pm_runtime_put_autosuspend(), pm_runtime_put_sync_autosuspend(), pm_runtime_autosuspend() and pm_request_autosuspend() now include a call to pm_runtime_mark_last_busy(). Remove the now-reduntant explicit call to pm_runtime_mark_last_busy(). Signed-off-by: Sakari Ailus Link: https://patch.msgid.link/20251027133232.392898-1-sakari.ailus@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 3fbc74710a13..ada96d490847 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -379,7 +379,6 @@ static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan) if (!desc->active_xfer) continue; - pm_runtime_mark_last_busy(atxdmac->dev); pm_runtime_put_autosuspend(atxdmac->dev); } } @@ -413,7 +412,6 @@ static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan) ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask); - pm_runtime_mark_last_busy(atxdmac->dev); pm_runtime_put_autosuspend(atxdmac->dev); return ret; @@ -446,7 +444,6 @@ static void at_xdmac_off(struct at_xdmac *atxdmac, bool suspend_descriptors) } } - pm_runtime_mark_last_busy(atxdmac->dev); pm_runtime_put_autosuspend(atxdmac->dev); } @@ -1676,7 +1673,6 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, spin_unlock: spin_unlock_irqrestore(&atchan->lock, flags); - pm_runtime_mark_last_busy(atxdmac->dev); pm_runtime_put_autosuspend(atxdmac->dev); return ret; } @@ -1758,7 +1754,6 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da, bad_desc->lld.mbr_ubc); - pm_runtime_mark_last_busy(atxdmac->dev); pm_runtime_put_autosuspend(atxdmac->dev); /* Then continue with usual descriptor management */ @@ -1822,7 +1817,6 @@ static void at_xdmac_tasklet(struct tasklet_struct *t) * Decrement runtime PM ref counter incremented in * at_xdmac_start_xfer(). */ - pm_runtime_mark_last_busy(atxdmac->dev); pm_runtime_put_autosuspend(atxdmac->dev); } @@ -1954,7 +1948,6 @@ static int at_xdmac_device_pause(struct dma_chan *chan) spin_unlock_irqrestore(&atchan->lock, flags); - pm_runtime_mark_last_busy(atxdmac->dev); pm_runtime_put_autosuspend(atxdmac->dev); return 0; @@ -1998,7 +1991,6 @@ static int at_xdmac_device_resume(struct dma_chan *chan) unlock: spin_unlock_irqrestore(&atchan->lock, flags); - pm_runtime_mark_last_busy(atxdmac->dev); pm_runtime_put_autosuspend(atxdmac->dev); return ret; @@ -2041,7 +2033,6 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); spin_unlock_irqrestore(&atchan->lock, flags); - pm_runtime_mark_last_busy(atxdmac->dev); pm_runtime_put_autosuspend(atxdmac->dev); return 0; @@ -2235,7 +2226,6 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev) } } - pm_runtime_mark_last_busy(atxdmac->dev); pm_runtime_put_autosuspend(atxdmac->dev); return 0; @@ -2412,7 +2402,6 @@ static int at_xdmac_probe(struct platform_device *pdev) at_xdmac_axi_config(pdev); - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; From 5f6f0cad6d2d599b765d572216a290e48bfdcb5f Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Mon, 27 Oct 2025 15:32:28 +0200 Subject: [PATCH 13/64] dmaengine: pl330: Remove redundant pm_runtime_mark_last_busy() calls pm_runtime_put_autosuspend(), pm_runtime_put_sync_autosuspend(), pm_runtime_autosuspend() and pm_request_autosuspend() now include a call to pm_runtime_mark_last_busy(). Remove the now-reduntant explicit call to pm_runtime_mark_last_busy(). Signed-off-by: Sakari Ailus Link: https://patch.msgid.link/20251027133232.392898-2-sakari.ailus@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 82a9fe88ad54..72f260328ae9 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2133,10 +2133,8 @@ static void pl330_tasklet(struct tasklet_struct *t) spin_unlock_irqrestore(&pch->lock, flags); /* If work list empty, power down */ - if (power_down) { - pm_runtime_mark_last_busy(pch->dmac->ddma.dev); + if (power_down) pm_runtime_put_autosuspend(pch->dmac->ddma.dev); - } } static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, @@ -2313,7 +2311,6 @@ static int pl330_terminate_all(struct dma_chan *chan) list_splice_tail_init(&pch->work_list, &pl330->desc_pool); list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); - pm_runtime_mark_last_busy(pl330->ddma.dev); if (power_down) pm_runtime_put_autosuspend(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); @@ -2347,7 +2344,6 @@ static int pl330_pause(struct dma_chan *chan) desc->status = PAUSED; } spin_unlock_irqrestore(&pch->lock, flags); - pm_runtime_mark_last_busy(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); return 0; @@ -2371,7 +2367,6 @@ static void pl330_free_chan_resources(struct dma_chan *chan) list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); spin_unlock_irqrestore(&pl330->lock, flags); - pm_runtime_mark_last_busy(pch->dmac->ddma.dev); pm_runtime_put_autosuspend(pch->dmac->ddma.dev); pl330_unprep_slave_fifo(pch); } @@ -3176,7 +3171,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pm_runtime_irq_safe(&adev->dev); pm_runtime_use_autosuspend(&adev->dev); pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY); - pm_runtime_mark_last_busy(&adev->dev); pm_runtime_put_autosuspend(&adev->dev); return 0; From 01f2bcf06d7e0e3c4badd03c030cf634ca10a172 Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Mon, 27 Oct 2025 15:32:30 +0200 Subject: [PATCH 14/64] dmaengine: ste_dma40: Remove redundant pm_runtime_mark_last_busy() calls pm_runtime_put_autosuspend(), pm_runtime_put_sync_autosuspend(), pm_runtime_autosuspend() and pm_request_autosuspend() now include a call to pm_runtime_mark_last_busy(). Remove the now-reduntant explicit call to pm_runtime_mark_last_busy(). Signed-off-by: Sakari Ailus Reviewed-by: Linus Walleij Link: https://patch.msgid.link/20251027133232.392898-4-sakari.ailus@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index d52e1685aed5..e67e0d66e6e8 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1452,7 +1452,6 @@ static int d40_pause(struct dma_chan *chan) res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); return res; @@ -1479,7 +1478,6 @@ static int d40_resume(struct dma_chan *chan) if (d40_residue(d40c) || d40_tx_is_linked(d40c)) res = d40_channel_execute_command(d40c, D40_DMA_RUN); - pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); return res; @@ -1581,7 +1579,6 @@ static void dma_tc_handle(struct d40_chan *d40c) if (d40_queue_start(d40c) == NULL) { d40c->busy = false; - pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); } @@ -2054,16 +2051,13 @@ static int d40_free_dma(struct d40_chan *d40c) else d40c->base->lookup_phy_chans[phy->num] = NULL; - if (d40c->busy) { - pm_runtime_mark_last_busy(d40c->base->dev); + if (d40c->busy) pm_runtime_put_autosuspend(d40c->base->dev); - } d40c->busy = false; d40c->phy_chan = NULL; d40c->configured = false; mark_last_busy: - pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); return res; } @@ -2466,7 +2460,6 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) if (is_free_phy) d40_config_write(d40c); mark_last_busy: - pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); return err; @@ -2618,12 +2611,9 @@ static int d40_terminate_all(struct dma_chan *chan) chan_err(d40c, "Failed to stop channel\n"); d40_term_all(d40c); - pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); - if (d40c->busy) { - pm_runtime_mark_last_busy(d40c->base->dev); + if (d40c->busy) pm_runtime_put_autosuspend(d40c->base->dev); - } d40c->busy = false; spin_unlock_irqrestore(&d40c->lock, flags); From 35d522a9612f5ba83192416521725acede02c28f Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Mon, 27 Oct 2025 15:32:31 +0200 Subject: [PATCH 15/64] dmaengine: ti: Remove redundant pm_runtime_mark_last_busy() calls pm_runtime_put_autosuspend(), pm_runtime_put_sync_autosuspend(), pm_runtime_autosuspend() and pm_request_autosuspend() now include a call to pm_runtime_mark_last_busy(). Remove the now-reduntant explicit call to pm_runtime_mark_last_busy(). Signed-off-by: Sakari Ailus Link: https://patch.msgid.link/20251027133232.392898-5-sakari.ailus@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/ti/cppi41.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index 8d8c3d6038fc..88756dccd62c 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -390,7 +390,6 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan) if (!c->is_tx) cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); - pm_runtime_mark_last_busy(cdd->ddev.dev); pm_runtime_put_autosuspend(cdd->ddev.dev); return 0; @@ -411,7 +410,6 @@ static void cppi41_dma_free_chan_resources(struct dma_chan *chan) WARN_ON(!list_empty(&cdd->pending)); - pm_runtime_mark_last_busy(cdd->ddev.dev); pm_runtime_put_autosuspend(cdd->ddev.dev); } @@ -509,7 +507,6 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan) cppi41_run_queue(cdd); spin_unlock_irqrestore(&cdd->lock, flags); - pm_runtime_mark_last_busy(cdd->ddev.dev); pm_runtime_put_autosuspend(cdd->ddev.dev); } @@ -627,7 +624,6 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( txd = &c->txd; err_out_not_ready: - pm_runtime_mark_last_busy(cdd->ddev.dev); pm_runtime_put_autosuspend(cdd->ddev.dev); return txd; @@ -1139,7 +1135,6 @@ static int cppi41_dma_probe(struct platform_device *pdev) if (ret) goto err_of; - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; From 3b81235280026c551660c6374ede9599fc82f617 Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Mon, 27 Oct 2025 15:32:32 +0200 Subject: [PATCH 16/64] dmaengine: zynqmp_dma: Remove redundant pm_runtime_mark_last_busy() calls pm_runtime_put_autosuspend(), pm_runtime_put_sync_autosuspend(), pm_runtime_autosuspend() and pm_request_autosuspend() now include a call to pm_runtime_mark_last_busy(). Remove the now-reduntant explicit call to pm_runtime_mark_last_busy(). Signed-off-by: Sakari Ailus Reviewed-by: Abin Joseph Link: https://patch.msgid.link/20251027133232.392898-6-sakari.ailus@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index f7e584de4335..7bb3716e60da 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -695,7 +695,6 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), chan->desc_pool_v, chan->desc_pool_p); kfree(chan->sw_desc_pool); - pm_runtime_mark_last_busy(chan->dev); pm_runtime_put_autosuspend(chan->dev); } @@ -1145,7 +1144,6 @@ static int zynqmp_dma_probe(struct platform_device *pdev) goto free_chan_resources; } - pm_runtime_mark_last_busy(zdev->dev); pm_runtime_put_sync_autosuspend(zdev->dev); return 0; From f5a4aa643ee968137eea902aa321c58c14c256c7 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 1 Nov 2025 12:15:24 -0700 Subject: [PATCH 17/64] dmaengine: dw_edma: correct kernel-doc warnings in Use the correct enum name in its kernel-doc heading. Add ending ':' to struct member names. Drop the @id: kernel-doc entry since there is no struct member named 'id'. edma.h:46: warning: expecting prototype for struct dw_edma_core_ops. Prototype was for struct dw_edma_plat_ops instead Warning: edma.h:101 struct member 'ops' not described in 'dw_edma_chip' Warning: edma.h:101 struct member 'flags' not described in 'dw_edma_chip' Warning: edma.h:101 struct member 'reg_base' not described in 'dw_edma_chip' Warning: edma.h:101 struct member 'll_wr_cnt' not described in 'dw_edma_chip' Warning: edma.h:101 struct member 'll_rd_cnt' not described in 'dw_edma_chip' Warning: edma.h:101 struct member 'll_region_wr' not described in 'dw_edma_chip' Warning: edma.h:101 struct member 'll_region_rd' not described in 'dw_edma_chip' Warning: edma.h:101 struct member 'dt_region_wr' not described in 'dw_edma_chip' Warning: edma.h:101 struct member 'dt_region_rd' not described in 'dw_edma_chip' Warning: edma.h:101 struct member 'mf' not described in 'dw_edma_chip' Signed-off-by: Randy Dunlap Link: https://patch.msgid.link/20251101191524.1991135-1-rdunlap@infradead.org Signed-off-by: Vinod Koul --- include/linux/dma/edma.h | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h index 3080747689f6..270b5458aecf 100644 --- a/include/linux/dma/edma.h +++ b/include/linux/dma/edma.h @@ -27,7 +27,7 @@ struct dw_edma_region { }; /** - * struct dw_edma_core_ops - platform-specific eDMA methods + * struct dw_edma_plat_ops - platform-specific eDMA methods * @irq_vector: Get IRQ number of the passed eDMA channel. Note the * method accepts the channel id in the end-to-end * numbering with the eDMA write channels being placed @@ -63,19 +63,17 @@ enum dw_edma_chip_flags { /** * struct dw_edma_chip - representation of DesignWare eDMA controller hardware * @dev: struct device of the eDMA controller - * @id: instance ID * @nr_irqs: total number of DMA IRQs - * @ops DMA channel to IRQ number mapping - * @flags dw_edma_chip_flags - * @reg_base DMA register base address - * @ll_wr_cnt DMA write link list count - * @ll_rd_cnt DMA read link list count - * @rg_region DMA register region - * @ll_region_wr DMA descriptor link list memory for write channel - * @ll_region_rd DMA descriptor link list memory for read channel - * @dt_region_wr DMA data memory for write channel - * @dt_region_rd DMA data memory for read channel - * @mf DMA register map format + * @ops: DMA channel to IRQ number mapping + * @flags: dw_edma_chip_flags + * @reg_base: DMA register base address + * @ll_wr_cnt: DMA write link list count + * @ll_rd_cnt: DMA read link list count + * @ll_region_wr: DMA descriptor link list memory for write channel + * @ll_region_rd: DMA descriptor link list memory for read channel + * @dt_region_wr: DMA data memory for write channel + * @dt_region_rd: DMA data memory for read channel + * @mf: DMA register map format * @dw: struct dw_edma that is filled by dw_edma_probe() */ struct dw_edma_chip { From de4761fb57f6a71eeb5a4c1167ae3606b08d8f59 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 Nov 2025 16:20:01 -0800 Subject: [PATCH 18/64] dmaengine: shdma: correct most kernel-doc issues in shdma-base.h Fix kernel-doc comments in include/linux/shdma-base.h to avoid most warnings: - prefix an enum name with "enum" - prefix enum values with '@' - prefix struct member names with '@' shdma-base.h:28: warning: cannot understand function prototype: 'enum shdma_pm_state ' Warning: shdma-base.h:103 struct member 'desc_completed' not described in 'shdma_ops' Warning: shdma-base.h:103 struct member 'halt_channel' not described in 'shdma_ops' Warning: shdma-base.h:103 struct member 'channel_busy' not described in 'shdma_ops' Warning: shdma-base.h:103 struct member 'slave_addr' not described in 'shdma_ops' Warning: shdma-base.h:103 struct member 'desc_setup' not described in 'shdma_ops' Warning: shdma-base.h:103 struct member 'set_slave' not described in 'shdma_ops' Warning: shdma-base.h:103 struct member 'setup_xfer' not described in 'shdma_ops' Warning: shdma-base.h:103 struct member 'start_xfer' not described in 'shdma_ops' Warning: shdma-base.h:103 struct member 'embedded_desc' not described in 'shdma_ops' Warning: shdma-base.h:103 struct member 'chan_irq' not described in 'shdma_ops' This one is not fixed: from 4f46f8ac80416: Warning: shdma-base.h:103 struct member 'get_partial' not described in 'shdma_ops' Signed-off-by: Randy Dunlap Link: https://patch.msgid.link/20251104002001.445297-1-rdunlap@infradead.org Signed-off-by: Vinod Koul --- include/linux/shdma-base.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index 03ba4dab2ef7..b6827c06d332 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -19,11 +19,11 @@ #include /** - * shdma_pm_state - DMA channel PM state - * SHDMA_PM_ESTABLISHED: either idle or during data transfer - * SHDMA_PM_BUSY: during the transfer preparation, when we have to + * enum shdma_pm_state - DMA channel PM state + * @SHDMA_PM_ESTABLISHED: either idle or during data transfer + * @SHDMA_PM_BUSY: during the transfer preparation, when we have to * drop the lock temporarily - * SHDMA_PM_PENDING: transfers pending + * @SHDMA_PM_PENDING: transfers pending */ enum shdma_pm_state { SHDMA_PM_ESTABLISHED, @@ -74,18 +74,18 @@ struct shdma_chan { /** * struct shdma_ops - simple DMA driver operations - * desc_completed: return true, if this is the descriptor, that just has + * @desc_completed: return true, if this is the descriptor, that just has * completed (atomic) - * halt_channel: stop DMA channel operation (atomic) - * channel_busy: return true, if the channel is busy (atomic) - * slave_addr: return slave DMA address - * desc_setup: set up the hardware specific descriptor portion (atomic) - * set_slave: bind channel to a slave - * setup_xfer: configure channel hardware for operation (atomic) - * start_xfer: start the DMA transfer (atomic) - * embedded_desc: return Nth struct shdma_desc pointer from the + * @halt_channel: stop DMA channel operation (atomic) + * @channel_busy: return true, if the channel is busy (atomic) + * @slave_addr: return slave DMA address + * @desc_setup: set up the hardware specific descriptor portion (atomic) + * @set_slave: bind channel to a slave + * @setup_xfer: configure channel hardware for operation (atomic) + * @start_xfer: start the DMA transfer (atomic) + * @embedded_desc: return Nth struct shdma_desc pointer from the * descriptor array - * chan_irq: process channel IRQ, return true if a transfer has + * @chan_irq: process channel IRQ, return true if a transfer has * completed (atomic) */ struct shdma_ops { From b442377c0ea2044a8f50ffa3fe59448f9ed922c9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 22 Dec 2025 21:33:25 +0100 Subject: [PATCH 19/64] dmaengine: sh: Discard pm_runtime_put() return value Clobbering an error value to be returned from shdma_tx_submit() with a pm_runtime_put() return value is not particularly useful, especially if the latter is 0, so stop doing that. This will facilitate a planned change of the pm_runtime_put() return type to void in the future. Signed-off-by: Rafael J. Wysocki Link: https://patch.msgid.link/9626129.rMLUfLXkoz@rafael.j.wysocki Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma-base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 834741adadaa..1e4b4d6069c0 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -143,7 +143,7 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) } schan->pm_state = SHDMA_PM_ESTABLISHED; - ret = pm_runtime_put(schan->dev); + pm_runtime_put(schan->dev); spin_unlock_irq(&schan->chan_lock); return ret; From 98b9f207afa53aff2edb0e52910c4348b456b37d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= Date: Mon, 22 Dec 2025 09:04:13 +0100 Subject: [PATCH 20/64] dmaengine: idxd: uapi: use UAPI types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using libc types and headers from the UAPI headers is problematic as it introduces a dependency on a full C toolchain. Use the fixed-width integer types provided by the UAPI headers instead. Signed-off-by: Thomas Weißschuh Acked-by: Arnd Bergmann Link: https://patch.msgid.link/20251222-uapi-idxd-v1-1-baa183adb20d@linutronix.de Signed-off-by: Vinod Koul --- include/uapi/linux/idxd.h | 270 +++++++++++++++++++------------------- 1 file changed, 133 insertions(+), 137 deletions(-) diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index 3d1987e1bb2d..fdcc8eefb925 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -3,11 +3,7 @@ #ifndef _USR_IDXD_H_ #define _USR_IDXD_H_ -#ifdef __KERNEL__ #include -#else -#include -#endif /* Driver command error status */ enum idxd_scmd_stat { @@ -176,132 +172,132 @@ enum iax_completion_status { #define DSA_COMP_STATUS(status) ((status) & DSA_COMP_STATUS_MASK) struct dsa_hw_desc { - uint32_t pasid:20; - uint32_t rsvd:11; - uint32_t priv:1; - uint32_t flags:24; - uint32_t opcode:8; - uint64_t completion_addr; + __u32 pasid:20; + __u32 rsvd:11; + __u32 priv:1; + __u32 flags:24; + __u32 opcode:8; + __u64 completion_addr; union { - uint64_t src_addr; - uint64_t rdback_addr; - uint64_t pattern; - uint64_t desc_list_addr; - uint64_t pattern_lower; - uint64_t transl_fetch_addr; + __u64 src_addr; + __u64 rdback_addr; + __u64 pattern; + __u64 desc_list_addr; + __u64 pattern_lower; + __u64 transl_fetch_addr; }; union { - uint64_t dst_addr; - uint64_t rdback_addr2; - uint64_t src2_addr; - uint64_t comp_pattern; + __u64 dst_addr; + __u64 rdback_addr2; + __u64 src2_addr; + __u64 comp_pattern; }; union { - uint32_t xfer_size; - uint32_t desc_count; - uint32_t region_size; + __u32 xfer_size; + __u32 desc_count; + __u32 region_size; }; - uint16_t int_handle; - uint16_t rsvd1; + __u16 int_handle; + __u16 rsvd1; union { - uint8_t expected_res; + __u8 expected_res; /* create delta record */ struct { - uint64_t delta_addr; - uint32_t max_delta_size; - uint32_t delt_rsvd; - uint8_t expected_res_mask; + __u64 delta_addr; + __u32 max_delta_size; + __u32 delt_rsvd; + __u8 expected_res_mask; }; - uint32_t delta_rec_size; - uint64_t dest2; + __u32 delta_rec_size; + __u64 dest2; /* CRC */ struct { - uint32_t crc_seed; - uint32_t crc_rsvd; - uint64_t seed_addr; + __u32 crc_seed; + __u32 crc_rsvd; + __u64 seed_addr; }; /* DIF check or strip */ struct { - uint8_t src_dif_flags; - uint8_t dif_chk_res; - uint8_t dif_chk_flags; - uint8_t dif_chk_res2[5]; - uint32_t chk_ref_tag_seed; - uint16_t chk_app_tag_mask; - uint16_t chk_app_tag_seed; + __u8 src_dif_flags; + __u8 dif_chk_res; + __u8 dif_chk_flags; + __u8 dif_chk_res2[5]; + __u32 chk_ref_tag_seed; + __u16 chk_app_tag_mask; + __u16 chk_app_tag_seed; }; /* DIF insert */ struct { - uint8_t dif_ins_res; - uint8_t dest_dif_flag; - uint8_t dif_ins_flags; - uint8_t dif_ins_res2[13]; - uint32_t ins_ref_tag_seed; - uint16_t ins_app_tag_mask; - uint16_t ins_app_tag_seed; + __u8 dif_ins_res; + __u8 dest_dif_flag; + __u8 dif_ins_flags; + __u8 dif_ins_res2[13]; + __u32 ins_ref_tag_seed; + __u16 ins_app_tag_mask; + __u16 ins_app_tag_seed; }; /* DIF update */ struct { - uint8_t src_upd_flags; - uint8_t upd_dest_flags; - uint8_t dif_upd_flags; - uint8_t dif_upd_res[5]; - uint32_t src_ref_tag_seed; - uint16_t src_app_tag_mask; - uint16_t src_app_tag_seed; - uint32_t dest_ref_tag_seed; - uint16_t dest_app_tag_mask; - uint16_t dest_app_tag_seed; + __u8 src_upd_flags; + __u8 upd_dest_flags; + __u8 dif_upd_flags; + __u8 dif_upd_res[5]; + __u32 src_ref_tag_seed; + __u16 src_app_tag_mask; + __u16 src_app_tag_seed; + __u32 dest_ref_tag_seed; + __u16 dest_app_tag_mask; + __u16 dest_app_tag_seed; }; /* Fill */ - uint64_t pattern_upper; + __u64 pattern_upper; /* Translation fetch */ struct { - uint64_t transl_fetch_res; - uint32_t region_stride; + __u64 transl_fetch_res; + __u32 region_stride; }; /* DIX generate */ struct { - uint8_t dix_gen_res; - uint8_t dest_dif_flags; - uint8_t dif_flags; - uint8_t dix_gen_res2[13]; - uint32_t ref_tag_seed; - uint16_t app_tag_mask; - uint16_t app_tag_seed; + __u8 dix_gen_res; + __u8 dest_dif_flags; + __u8 dif_flags; + __u8 dix_gen_res2[13]; + __u32 ref_tag_seed; + __u16 app_tag_mask; + __u16 app_tag_seed; }; - uint8_t op_specific[24]; + __u8 op_specific[24]; }; } __attribute__((packed)); struct iax_hw_desc { - uint32_t pasid:20; - uint32_t rsvd:11; - uint32_t priv:1; - uint32_t flags:24; - uint32_t opcode:8; - uint64_t completion_addr; - uint64_t src1_addr; - uint64_t dst_addr; - uint32_t src1_size; - uint16_t int_handle; + __u32 pasid:20; + __u32 rsvd:11; + __u32 priv:1; + __u32 flags:24; + __u32 opcode:8; + __u64 completion_addr; + __u64 src1_addr; + __u64 dst_addr; + __u32 src1_size; + __u16 int_handle; union { - uint16_t compr_flags; - uint16_t decompr_flags; + __u16 compr_flags; + __u16 decompr_flags; }; - uint64_t src2_addr; - uint32_t max_dst_size; - uint32_t src2_size; - uint32_t filter_flags; - uint32_t num_inputs; + __u64 src2_addr; + __u32 max_dst_size; + __u32 src2_size; + __u32 filter_flags; + __u32 num_inputs; } __attribute__((packed)); struct dsa_raw_desc { - uint64_t field[8]; + __u64 field[8]; } __attribute__((packed)); /* @@ -309,91 +305,91 @@ struct dsa_raw_desc { * volatile and prevent the compiler from optimize the read. */ struct dsa_completion_record { - volatile uint8_t status; + volatile __u8 status; union { - uint8_t result; - uint8_t dif_status; + __u8 result; + __u8 dif_status; }; - uint8_t fault_info; - uint8_t rsvd; + __u8 fault_info; + __u8 rsvd; union { - uint32_t bytes_completed; - uint32_t descs_completed; + __u32 bytes_completed; + __u32 descs_completed; }; - uint64_t fault_addr; + __u64 fault_addr; union { /* common record */ struct { - uint32_t invalid_flags:24; - uint32_t rsvd2:8; + __u32 invalid_flags:24; + __u32 rsvd2:8; }; - uint32_t delta_rec_size; - uint64_t crc_val; + __u32 delta_rec_size; + __u64 crc_val; /* DIF check & strip */ struct { - uint32_t dif_chk_ref_tag; - uint16_t dif_chk_app_tag_mask; - uint16_t dif_chk_app_tag; + __u32 dif_chk_ref_tag; + __u16 dif_chk_app_tag_mask; + __u16 dif_chk_app_tag; }; /* DIF insert */ struct { - uint64_t dif_ins_res; - uint32_t dif_ins_ref_tag; - uint16_t dif_ins_app_tag_mask; - uint16_t dif_ins_app_tag; + __u64 dif_ins_res; + __u32 dif_ins_ref_tag; + __u16 dif_ins_app_tag_mask; + __u16 dif_ins_app_tag; }; /* DIF update */ struct { - uint32_t dif_upd_src_ref_tag; - uint16_t dif_upd_src_app_tag_mask; - uint16_t dif_upd_src_app_tag; - uint32_t dif_upd_dest_ref_tag; - uint16_t dif_upd_dest_app_tag_mask; - uint16_t dif_upd_dest_app_tag; + __u32 dif_upd_src_ref_tag; + __u16 dif_upd_src_app_tag_mask; + __u16 dif_upd_src_app_tag; + __u32 dif_upd_dest_ref_tag; + __u16 dif_upd_dest_app_tag_mask; + __u16 dif_upd_dest_app_tag; }; /* DIX generate */ struct { - uint64_t dix_gen_res; - uint32_t dix_ref_tag; - uint16_t dix_app_tag_mask; - uint16_t dix_app_tag; + __u64 dix_gen_res; + __u32 dix_ref_tag; + __u16 dix_app_tag_mask; + __u16 dix_app_tag; }; - uint8_t op_specific[16]; + __u8 op_specific[16]; }; } __attribute__((packed)); struct dsa_raw_completion_record { - uint64_t field[4]; + __u64 field[4]; } __attribute__((packed)); struct iax_completion_record { - volatile uint8_t status; - uint8_t error_code; - uint8_t fault_info; - uint8_t rsvd; - uint32_t bytes_completed; - uint64_t fault_addr; - uint32_t invalid_flags; - uint32_t rsvd2; - uint32_t output_size; - uint8_t output_bits; - uint8_t rsvd3; - uint16_t xor_csum; - uint32_t crc; - uint32_t min; - uint32_t max; - uint32_t sum; - uint64_t rsvd4[2]; + volatile __u8 status; + __u8 error_code; + __u8 fault_info; + __u8 rsvd; + __u32 bytes_completed; + __u64 fault_addr; + __u32 invalid_flags; + __u32 rsvd2; + __u32 output_size; + __u8 output_bits; + __u8 rsvd3; + __u16 xor_csum; + __u32 crc; + __u32 min; + __u32 max; + __u32 sum; + __u64 rsvd4[2]; } __attribute__((packed)); struct iax_raw_completion_record { - uint64_t field[8]; + __u64 field[8]; } __attribute__((packed)); #endif From 7178c3586ab42693b28bb81014320a7783e5c435 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Sun, 21 Dec 2025 16:04:48 +0800 Subject: [PATCH 21/64] dmaengine: sun6i: Choose appropriate burst length under maxburst maxburst, as provided by the client, specifies the largest amount of data that is allowed to be transferred in one burst. This limit is normally provided to avoid a data burst overflowing the target FIFO. It does not mean that the DMA engine can only do bursts in that size. Let the driver pick the largest supported burst length within the given limit. This lets the driver work correctly with some clients that give a large maxburst value. In particular, the 8250_dw driver will give a quarter of the UART's FIFO size as maxburst. On some systems the FIFO size is 256 bytes, giving a maxburst of 64 bytes, while the hardware only supports bursts of up to 16 bytes. Signed-off-by: Chen-Yu Tsai Reviewed-by: Jernej Skrabec Link: https://patch.msgid.link/20251221080450.1813479-1-wens@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 2215ff877bf7..f9d876deb1f0 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -583,6 +583,22 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) return ret; } +static u32 find_burst_size(const u32 burst_lengths, u32 maxburst) +{ + if (!maxburst) + return 1; + + if (BIT(maxburst) & burst_lengths) + return maxburst; + + /* Hardware only does power-of-two bursts. */ + for (u32 burst = rounddown_pow_of_two(maxburst); burst > 0; burst /= 2) + if (BIT(burst) & burst_lengths) + return burst; + + return 1; +} + static int set_config(struct sun6i_dma_dev *sdev, struct dma_slave_config *sconfig, enum dma_transfer_direction direction, @@ -616,15 +632,13 @@ static int set_config(struct sun6i_dma_dev *sdev, return -EINVAL; if (!(BIT(dst_addr_width) & sdev->slave.dst_addr_widths)) return -EINVAL; - if (!(BIT(src_maxburst) & sdev->cfg->src_burst_lengths)) - return -EINVAL; - if (!(BIT(dst_maxburst) & sdev->cfg->dst_burst_lengths)) - return -EINVAL; src_width = convert_buswidth(src_addr_width); dst_width = convert_buswidth(dst_addr_width); - dst_burst = convert_burst(dst_maxburst); - src_burst = convert_burst(src_maxburst); + src_burst = find_burst_size(sdev->cfg->src_burst_lengths, src_maxburst); + dst_burst = find_burst_size(sdev->cfg->dst_burst_lengths, dst_maxburst); + dst_burst = convert_burst(dst_burst); + src_burst = convert_burst(src_burst); *p_cfg = DMA_CHAN_CFG_SRC_WIDTH(src_width) | DMA_CHAN_CFG_DST_WIDTH(dst_width); From 7105e968d1f6f6753f8fc3c47b8a705b6dad36d4 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Sun, 21 Dec 2025 14:47:52 +0800 Subject: [PATCH 22/64] dmaengine: sun6i: Add debug messages for cyclic DMA prepare The driver already has debug messages for memcpy and linear transfers, but is missing them for cyclic transfers. Cyclic transfers are one of the main uses of the DMA controller, used for audio data transfers. And since these are likely the first DMA peripherals to be enabled, it helps to have these debug messages. Acked-by: Jernej Skrabec Signed-off-by: Chen-Yu Tsai Link: https://patch.msgid.link/20251221064754.1783369-1-wens@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index f9d876deb1f0..c33f151953eb 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -840,6 +840,11 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); + dev_dbg(chan2dev(chan), + "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", + __func__, vchan->vc.chan.chan_id, + &sconfig->dst_addr, &buf_addr, + buf_len, flags); } else { sun6i_dma_set_addr(sdev, v_lli, sconfig->src_addr, @@ -847,6 +852,11 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); + dev_dbg(chan2dev(chan), + "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", + __func__, vchan->vc.chan.chan_id, + &buf_addr, &sconfig->src_addr, + buf_len, flags); } prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); From 5c9142a8063f71233b25d94ae0d73e7dcf9d2a1d Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Thu, 18 Dec 2025 10:39:37 +0200 Subject: [PATCH 23/64] dmaengine: xilinx_dma: Add support for residue on direct AXIDMA S2MM AXIDMA IP supports reporting the amount of bytes transferred on the S2MM channel in direct mode (i.e. non-SG), but the driver does not. Thus the driver always reports that all of the buffer was filled. Add xilinx_dma_get_residue_axidma_direct_s2mm() which gets the residue amount for direct AXIDMA for S2MM direction. Signed-off-by: Tomi Valkeinen Reviewed-by: Suraj Gupta Link: https://patch.msgid.link/20251218-xilinx-dma-residue-fix-v1-1-7cd221d69d6b@ideasonboard.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index fabff602065f..64b3fba4e44f 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1021,6 +1021,24 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan, return residue; } +static u32 +xilinx_dma_get_residue_axidma_direct_s2mm(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc) +{ + struct xilinx_axidma_tx_segment *seg; + struct xilinx_axidma_desc_hw *hw; + u32 finished_len; + + finished_len = dma_ctrl_read(chan, XILINX_DMA_REG_BTT); + + seg = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, + node); + + hw = &seg->hw; + + return hw->control - finished_len; +} + /** * xilinx_dma_chan_handle_cyclic - Cyclic dma callback * @chan: Driver specific dma channel @@ -1732,6 +1750,9 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) desc->residue = xilinx_dma_get_residue(chan, desc); + else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA && + chan->direction == DMA_DEV_TO_MEM && !chan->has_sg) + desc->residue = xilinx_dma_get_residue_axidma_direct_s2mm(chan, desc); else desc->residue = 0; desc->err = chan->err; From aaf3bc0265744adbc2d364964ef409cf118d193d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Le=20Goffic?= Date: Wed, 17 Dec 2025 09:15:03 +0100 Subject: [PATCH 24/64] dmaengine: stm32-mdma: initialize m2m_hw_period and ccr to fix warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit m2m_hw_period is initialized only when chan_config->m2m_hw is true. This triggers a warning: ‘m2m_hw_period’ may be used uninitialized [-Wmaybe-uninitialized] Although m2m_hw_period is only used when chan_config->m2m_hw is true and ignored otherwise, initialize it unconditionally to 0. ccr is initialized by stm32_mdma_set_xfer_param() when the sg list is not empty. This triggers a warning: ‘ccr’ may be used uninitialized [-Wmaybe-uninitialized] Indeed, it could be used uninitialized if the sg list is empty. Initialize it to 0. Signed-off-by: Clément Le Goffic Reviewed-by: Clément Le Goffic Signed-off-by: Amelie Delaunay Link: https://patch.msgid.link/20251217-mdma_warnings_fix-v2-1-340200e0bb55@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32/stm32-mdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/stm32/stm32-mdma.c b/drivers/dma/stm32/stm32-mdma.c index 080c1c725216..b87d41b234df 100644 --- a/drivers/dma/stm32/stm32-mdma.c +++ b/drivers/dma/stm32/stm32-mdma.c @@ -731,7 +731,7 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan, struct stm32_mdma_chan_config *chan_config = &chan->chan_config; struct scatterlist *sg; dma_addr_t src_addr, dst_addr; - u32 m2m_hw_period, ccr, ctcr, ctbr; + u32 m2m_hw_period = 0, ccr = 0, ctcr, ctbr; int i, ret = 0; if (chan_config->m2m_hw) From 4b9ce35ca5924c195df1a6bbccdc9aae4f5cb422 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 13 Nov 2025 13:22:22 +0100 Subject: [PATCH 25/64] dt-bindings: dma: mediatek,uart-dma: Allow MT6795 single compatible While it is true that this SoC is compatible with the MT6577 APDMA IP, that is valid only when the IP is used in 32-bits addressing mode, and, by the way there is no good reason to do so. Since the APDMA IP in MT6795 supports 33 bits addressing, this means that it is a newer revision compared to the one found in MT6577, hence only partially compatible with it. Allow nodes to specify "mediatek,mt6795-uart-dma" as their only compatible in the case of MT6795; this is done in lieu of the fact that there are other SoCs integrating the same version of this IP as MT6795, and those will eventually get their own compatible that expresses full compatibility with this SoC. Signed-off-by: AngeloGioacchino Del Regno Acked-by: Conor Dooley Link: https://patch.msgid.link/20251113122229.23998-2-angelogioacchino.delregno@collabora.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml b/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml index dab468a88942..10fc92b60de5 100644 --- a/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml +++ b/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml @@ -28,6 +28,7 @@ properties: - const: mediatek,mt6577-uart-dma - enum: - mediatek,mt6577-uart-dma + - mediatek,mt6795-uart-dma reg: minItems: 1 From ebc5e9176e0f9b7effc259b58a7387019ac8811d Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 13 Nov 2025 13:22:23 +0100 Subject: [PATCH 26/64] dt-bindings: dma: mediatek,uart-dma: Deprecate mediatek,dma-33bits While this property wants to express a capability of the hardware, this is only used by the driver itself to vary the DMA bits during probe. Different hardware shall instead have different compatible strings. Following the driver cleanup and the introduction of a specific compatible string for the APDMA IP version found in MT6795, set the "mediatek,dma-33bits" vendor property as deprecated. Signed-off-by: AngeloGioacchino Del Regno Acked-by: Conor Dooley Link: https://patch.msgid.link/20251113122229.23998-3-angelogioacchino.delregno@collabora.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml b/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml index 10fc92b60de5..4d927726df93 100644 --- a/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml +++ b/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml @@ -59,6 +59,7 @@ properties: mediatek,dma-33bits: type: boolean + deprecated: true description: Enable 33-bits UART APDMA support required: From fd7843f0da58b37072c1dafa779d128bb36912bf Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 13 Nov 2025 13:22:24 +0100 Subject: [PATCH 27/64] dt-bindings: dma: mediatek,uart-dma: Support all SoC generations Add support for the APDMA IP found in all of the SoC generations that are currently supported upstream; this includes: - MT8173, MT8183, fully compatible with MT6577 (32-bits) - MT7988, MT8186, MT8188, MT8192, MT8195 and MT6835 (34-bits) - MT6991, MT8196 and MT6985 (35-bits) ...where: - MT6835 is the first SoC where the AP_DMA IP supports 34-bits addressing; and - MT6985 is the first SoC where the AP_DMA IP supports 35-bits addressing. While at it, also add myself in the maintainers list. Signed-off-by: AngeloGioacchino Del Regno Acked-by: Conor Dooley Link: https://patch.msgid.link/20251113122229.23998-4-angelogioacchino.delregno@collabora.com Signed-off-by: Vinod Koul --- .../bindings/dma/mediatek,uart-dma.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml b/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml index 4d927726df93..3708518fe7fc 100644 --- a/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml +++ b/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml @@ -7,6 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: MediaTek UART APDMA controller maintainers: + - AngeloGioacchino Del Regno - Long Cheng description: | @@ -23,12 +24,29 @@ properties: - enum: - mediatek,mt2712-uart-dma - mediatek,mt6795-uart-dma + - mediatek,mt8173-uart-dma + - mediatek,mt8183-uart-dma - mediatek,mt8365-uart-dma - mediatek,mt8516-uart-dma - const: mediatek,mt6577-uart-dma + - items: + - enum: + - mediatek,mt7988-uart-dma + - mediatek,mt8186-uart-dma + - mediatek,mt8188-uart-dma + - mediatek,mt8192-uart-dma + - mediatek,mt8195-uart-dma + - const: mediatek,mt6835-uart-dma + - items: + - enum: + - mediatek,mt6991-uart-dma + - mediatek,mt8196-uart-dma + - const: mediatek,mt6985-uart-dma - enum: - mediatek,mt6577-uart-dma - mediatek,mt6795-uart-dma + - mediatek,mt6835-uart-dma + - mediatek,mt6985-uart-dma reg: minItems: 1 From ff81a68a87b1dbf5c1b819f240f83715c701ef0d Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 13 Nov 2025 13:22:25 +0100 Subject: [PATCH 28/64] dmaengine: mediatek: uart-apdma: Get addressing bits from match data The only SoC that declares mediatek,dma-33bits in its devicetree currently is MT6795, which obviously also declares a SoC-specific compatible string: in preparation for adding new SoCs with 34 bits addressing, replace the parsing of said vendor property with logic to get the number of addressing bits from platform data associated to compatible strings. While at it, also make the bit_mask variable unsigned and move the `int rc` declaration as last to beautify the code. Thanks to the correct declaration of the APDMA node is in all of the MediaTek device trees that are currently upstream, this commit brings no functional differences. Signed-off-by: AngeloGioacchino Del Regno Acked-by: Conor Dooley Link: https://patch.msgid.link/20251113122229.23998-5-angelogioacchino.delregno@collabora.com Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-uart-apdma.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index 08e15177427b..bbacaa89eb92 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -468,7 +468,8 @@ static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd) } static const struct of_device_id mtk_uart_apdma_match[] = { - { .compatible = "mediatek,mt6577-uart-dma", }, + { .compatible = "mediatek,mt6577-uart-dma", .data = (void *)32 }, + { .compatible = "mediatek,mt6795-uart-dma", .data = (void *)33 }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match); @@ -477,9 +478,9 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mtk_uart_apdmadev *mtkd; - int bit_mask = 32, rc; struct mtk_chan *c; - unsigned int i; + unsigned int bit_mask, i; + int rc; mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL); if (!mtkd) @@ -492,12 +493,10 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev) return rc; } - if (of_property_read_bool(np, "mediatek,dma-33bits")) + bit_mask = (unsigned int)(uintptr_t)of_device_get_match_data(&pdev->dev); + if (bit_mask > 32) mtkd->support_33bits = true; - if (mtkd->support_33bits) - bit_mask = 33; - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask)); if (rc) return rc; From 58ab9d7b6651d21e1cff1777529f2d3dd0b4e851 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 13 Nov 2025 13:22:26 +0100 Subject: [PATCH 29/64] dmaengine: mediatek: uart-apdma: Fix above 4G addressing TX/RX The VFF_4G_SUPPORT register is named differently in datasheets, and its name is "VFF_ADDR2"; was this named correctly from the beginning it would've been clearer that there was a mistake in the programming sequence. This register is supposed to hold the high bits to support the DMA addressing above 4G (so, more than 32 bits) and not a bit to "enable" the support for VFF 4G. Fix the name of this register, and also fix its usage by writing the upper 32 bits of the dma_addr_t on it when the SoC supports such feature. Fixes: 9135408c3ace ("dmaengine: mediatek: Add MediaTek UART APDMA support") Signed-off-by: AngeloGioacchino Del Regno Link: https://patch.msgid.link/20251113122229.23998-6-angelogioacchino.delregno@collabora.com Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-uart-apdma.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index bbacaa89eb92..820422cd6942 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -41,7 +41,7 @@ #define VFF_STOP_CLR_B 0 #define VFF_EN_CLR_B 0 #define VFF_INT_EN_CLR_B 0 -#define VFF_4G_SUPPORT_CLR_B 0 +#define VFF_ADDR2_CLR_B 0 /* * interrupt trigger level for tx @@ -72,7 +72,7 @@ /* TX: the buffer size SW can write. RX: the buffer size HW can write. */ #define VFF_LEFT_SIZE 0x40 #define VFF_DEBUG_STATUS 0x50 -#define VFF_4G_SUPPORT 0x54 +#define VFF_ADDR2 0x54 struct mtk_uart_apdmadev { struct dma_device ddev; @@ -149,7 +149,7 @@ static void mtk_uart_apdma_start_tx(struct mtk_chan *c) mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); if (mtkd->support_33bits) - mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); + mtk_uart_apdma_write(c, VFF_ADDR2, upper_32_bits(d->addr)); } mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); @@ -192,7 +192,7 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c) mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); if (mtkd->support_33bits) - mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); + mtk_uart_apdma_write(c, VFF_ADDR2, upper_32_bits(d->addr)); } mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B); @@ -298,7 +298,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) } if (mtkd->support_33bits) - mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B); + mtk_uart_apdma_write(c, VFF_ADDR2, VFF_ADDR2_CLR_B); err_pm: pm_runtime_put_noidle(mtkd->ddev.dev); From 7cb173936858f2278d9cf8b2f5d7d52fd000e54e Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 13 Nov 2025 13:22:27 +0100 Subject: [PATCH 30/64] dmaengine: mediatek: mtk-uart-apdma: Rename support_33bits to support_ext_addr In preparation for adding support for SoCs with APDMA IP versions supporting more than 33 bits addressing, rename the support_33bits variable to support_ext_addr to signal support for extended, above 4GB, addressing. This change is cosmetic only, and brings no functional differences. Signed-off-by: AngeloGioacchino Del Regno Link: https://patch.msgid.link/20251113122229.23998-7-angelogioacchino.delregno@collabora.com Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-uart-apdma.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index 820422cd6942..3829c05a12fd 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -77,7 +77,7 @@ struct mtk_uart_apdmadev { struct dma_device ddev; struct clk *clk; - bool support_33bits; + bool support_ext_addr; unsigned int dma_requests; }; @@ -148,7 +148,7 @@ static void mtk_uart_apdma_start_tx(struct mtk_chan *c) mtk_uart_apdma_write(c, VFF_WPT, 0); mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); - if (mtkd->support_33bits) + if (mtkd->support_ext_addr) mtk_uart_apdma_write(c, VFF_ADDR2, upper_32_bits(d->addr)); } @@ -191,7 +191,7 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c) mtk_uart_apdma_write(c, VFF_RPT, 0); mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); - if (mtkd->support_33bits) + if (mtkd->support_ext_addr) mtk_uart_apdma_write(c, VFF_ADDR2, upper_32_bits(d->addr)); } @@ -297,7 +297,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) goto err_pm; } - if (mtkd->support_33bits) + if (mtkd->support_ext_addr) mtk_uart_apdma_write(c, VFF_ADDR2, VFF_ADDR2_CLR_B); err_pm: @@ -495,7 +495,7 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev) bit_mask = (unsigned int)(uintptr_t)of_device_get_match_data(&pdev->dev); if (bit_mask > 32) - mtkd->support_33bits = true; + mtkd->support_ext_addr = true; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask)); if (rc) From 391e20f21cfdee2f55f2274e83b37c03199062ea Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 13 Nov 2025 13:22:28 +0100 Subject: [PATCH 31/64] dmaengine: mediatek: mtk-uart-apdma: Add support for Dimensity 6300 Add a compatible string and match data for the APDMA IP version found in the MediaTek Dimensity 6300 MT6835 SoC; this supports extended addressing with up to 34 bits. Signed-off-by: AngeloGioacchino Del Regno Link: https://patch.msgid.link/20251113122229.23998-8-angelogioacchino.delregno@collabora.com Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-uart-apdma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index 3829c05a12fd..8ca850356ef1 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -470,6 +470,7 @@ static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd) static const struct of_device_id mtk_uart_apdma_match[] = { { .compatible = "mediatek,mt6577-uart-dma", .data = (void *)32 }, { .compatible = "mediatek,mt6795-uart-dma", .data = (void *)33 }, + { .compatible = "mediatek,mt6835-uart-dma", .data = (void *)34 }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match); From 3587b2b6bf7681835c7c366c6083e2cd9e4b519d Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 13 Nov 2025 13:22:29 +0100 Subject: [PATCH 32/64] dmaengine: mediatek: mtk-uart-apdma: Add support for Dimensity 9200 Add a compatible string and match data for the APDMA IP version found in the MediaTek Dimensity 9200 MT6985 SoC; this supports extended addressing with up to 35 bits. Other SoCs with this IP version also include the Dimensity 9400 MT6991 and Kompanio Ultra MT8196 (which don't need a specific compatible in this driver and can reuse the mt6985 one). Signed-off-by: AngeloGioacchino Del Regno Link: https://patch.msgid.link/20251113122229.23998-9-angelogioacchino.delregno@collabora.com Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-uart-apdma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index 8ca850356ef1..3b9761f4e8a1 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -471,6 +471,7 @@ static const struct of_device_id mtk_uart_apdma_match[] = { { .compatible = "mediatek,mt6577-uart-dma", .data = (void *)32 }, { .compatible = "mediatek,mt6795-uart-dma", .data = (void *)33 }, { .compatible = "mediatek,mt6835-uart-dma", .data = (void *)34 }, + { .compatible = "mediatek,mt6985-uart-dma", .data = (void *)35 }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match); From b729eed5b74eeda36d51d6499f1a06ecc974f31a Mon Sep 17 00:00:00 2001 From: Jyothi Kumar Seerapu Date: Wed, 5 Nov 2025 19:00:42 -0800 Subject: [PATCH 33/64] dt-bindings: dma: qcom,gpi: Document GPI DMA engine for Kaanapali and Glymur SoCs Document the GPI DMA engine on the Kaanapali and Glymur platforms. Signed-off-by: Jyothi Kumar Seerapu Signed-off-by: Pankaj Patil Signed-off-by: Jingyi Wang Reviewed-by: Krzysztof Kozlowski Link: https://patch.msgid.link/20251105-knp-bus-v2-1-ed3095c7013a@oss.qualcomm.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/qcom,gpi.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml index bbe4da2a1105..4cd867854a5f 100644 --- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml +++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml @@ -24,6 +24,8 @@ properties: - qcom,sm6350-gpi-dma - items: - enum: + - qcom,glymur-gpi-dma + - qcom,kaanapali-gpi-dma - qcom,milos-gpi-dma - qcom,qcm2290-gpi-dma - qcom,qcs8300-gpi-dma From 19fed6ca15c4c41c28059c25f9cc85c0058cc4fd Mon Sep 17 00:00:00 2001 From: Rosen Penev Date: Wed, 5 Nov 2025 18:20:14 -0800 Subject: [PATCH 34/64] dmaengine: st_fdma: change dreg_line to long The code is encoding a pointer into an int which works fine with a 32-bit build. Not with a 64-bit one. Signed-off-by: Rosen Penev Link: https://patch.msgid.link/20251106022015.84970-2-rosenp@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/st_fdma.c | 2 +- drivers/dma/st_fdma.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c index dc2ab7d16cf2..0f42a3c30bdb 100644 --- a/drivers/dma/st_fdma.c +++ b/drivers/dma/st_fdma.c @@ -68,7 +68,7 @@ static void st_fdma_dreq_put(struct st_fdma_chan *fchan) { struct st_fdma_dev *fdev = fchan->fdev; - dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line); + dev_dbg(fdev->dev, "put dreq_line:%#lx\n", fchan->dreq_line); clear_bit(fchan->dreq_line, &fdev->dreq_mask); } diff --git a/drivers/dma/st_fdma.h b/drivers/dma/st_fdma.h index f296412e96b6..f1e746f7bc7d 100644 --- a/drivers/dma/st_fdma.h +++ b/drivers/dma/st_fdma.h @@ -120,7 +120,7 @@ struct st_fdma_chan { struct dma_slave_config scfg; struct st_fdma_cfg cfg; - int dreq_line; + long dreq_line; struct virt_dma_chan vchan; struct st_fdma_desc *fdesc; From c3af05623e079c2a9a9363386796fdea20defa18 Mon Sep 17 00:00:00 2001 From: Rosen Penev Date: Wed, 5 Nov 2025 18:20:15 -0800 Subject: [PATCH 35/64] dmaengine: st_fdma: add COMPILE_TEST support Add COMPILE_TEST as an option to allow test building the driver. Signed-off-by: Rosen Penev Link: https://patch.msgid.link/20251106022015.84970-3-rosenp@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 8bb0a119ecd4..66cda7cc9f7a 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -590,7 +590,7 @@ config STE_DMA40 config ST_FDMA tristate "ST FDMA dmaengine support" - depends on ARCH_STI + depends on ARCH_STI || COMPILE_TEST depends on REMOTEPROC select ST_SLIM_REMOTEPROC select DMA_ENGINE From 9bd257181fd5c996d922e9991500ad27987cfbf4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nuno=20S=C3=A1?= Date: Tue, 4 Nov 2025 16:22:25 +0000 Subject: [PATCH 36/64] dma: dma-axi-dmac: fix SW cyclic transfers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If 'hw_cyclic' is false we should still be able to do cyclic transfers in "software". That was not working for the case where 'desc->num_sgs' is 1 because 'chan->next_desc' is never set with the current desc which means that the cyclic transfer only runs once and in the next SOT interrupt we do nothing since vchan_next_desc() will return NULL. Fix it by setting 'chan->next_desc' as soon as we get a new desc via vchan_next_desc(). Fixes: 0e3b67b348b8 ("dmaengine: Add support for the Analog Devices AXI-DMAC DMA controller") Signed-off-by: Nuno Sá base-commit: 398035178503bf662281bbffb4bebce1460a4bc5 change-id: 20251104-axi-dmac-fixes-and-improvs-e3ad512a329c Acked-by: Michael Hennerich Link: https://patch.msgid.link/20251104-axi-dmac-fixes-and-improvs-v1-1-3e6fd9328f72@analog.com Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 5b06b0dc67ee..e22639822045 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -247,6 +247,7 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) return; list_move_tail(&vdesc->node, &chan->active_descs); desc = to_axi_dmac_desc(vdesc); + chan->next_desc = desc; } sg = &desc->sg[desc->num_submitted]; @@ -265,8 +266,6 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) else chan->next_desc = NULL; flags |= AXI_DMAC_FLAG_LAST; - } else { - chan->next_desc = desc; } sg->hw->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); From bbcbafb99df41a1d81403eb4f5bb443b38228b57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nuno=20S=C3=A1?= Date: Tue, 4 Nov 2025 16:22:26 +0000 Subject: [PATCH 37/64] dma: dma-axi-dmac: fix HW scatter-gather not looking at the queue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For HW scatter gather transfers we still need to look for the queue. The HW is capable of queueing 3 concurrent transfers and if we try more than that we'll get the submit queue full and should return. Otherwise, if we go ahead and program the new transfer, we end up discarding it. Fixes: e97dc7435972 ("dmaengine: axi-dmac: Add support for scatter-gather transfers") Signed-off-by: Nuno Sá base-commit: 398035178503bf662281bbffb4bebce1460a4bc5 change-id: 20251104-axi-dmac-fixes-and-improvs-e3ad512a329c Acked-by: Michael Hennerich Link: https://patch.msgid.link/20251104-axi-dmac-fixes-and-improvs-v1-2-3e6fd9328f72@analog.com Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index e22639822045..0f25f6d8ae71 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -233,11 +233,9 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) unsigned int flags = 0; unsigned int val; - if (!chan->hw_sg) { - val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); - if (val) /* Queue is full, wait for the next SOT IRQ */ - return; - } + val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); + if (val) /* Queue is full, wait for the next SOT IRQ */ + return; desc = chan->next_desc; From b2440442ccb68479fa6d307917419983f3c87e83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nuno=20S=C3=A1?= Date: Tue, 4 Nov 2025 16:22:27 +0000 Subject: [PATCH 38/64] dma: dma-axi-dmac: support bigger than 32bits addresses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In some supported platforms as ARCH_ZYNQMP, part of the memory is mapped above 32bit addresses and since the DMA mask, by default, is set to 32bits, we would need to rely on swiotlb (which incurs a performance penalty) for the DMA mappings. Thus, we can write either the SRC or DEST high addresses with 1's and read them back. The last bit set on the return value will reflect the IP address bus width and so we can update the device DMA mask accordingly. While at it, support bigger that 32 bits transfers in IP without HW scatter gather support. Signed-off-by: Nuno Sá base-commit: 398035178503bf662281bbffb4bebce1460a4bc5 change-id: 20251104-axi-dmac-fixes-and-improvs-e3ad512a329c Acked-by: Michael Hennerich Link: https://patch.msgid.link/20251104-axi-dmac-fixes-and-improvs-v1-3-3e6fd9328f72@analog.com Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 0f25f6d8ae71..15c569449a28 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -69,7 +69,9 @@ #define AXI_DMAC_REG_START_TRANSFER 0x408 #define AXI_DMAC_REG_FLAGS 0x40c #define AXI_DMAC_REG_DEST_ADDRESS 0x410 +#define AXI_DMAC_REG_DEST_ADDRESS_HIGH 0x490 #define AXI_DMAC_REG_SRC_ADDRESS 0x414 +#define AXI_DMAC_REG_SRC_ADDRESS_HIGH 0x494 #define AXI_DMAC_REG_X_LENGTH 0x418 #define AXI_DMAC_REG_Y_LENGTH 0x41c #define AXI_DMAC_REG_DEST_STRIDE 0x420 @@ -271,11 +273,14 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) if (!chan->hw_sg) { if (axi_dmac_dest_is_mem(chan)) { axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->hw->dest_addr); + axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS_HIGH, + sg->hw->dest_addr >> 32); axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->hw->dst_stride); } if (axi_dmac_src_is_mem(chan)) { axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->hw->src_addr); + axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS_HIGH, sg->hw->src_addr >> 32); axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->hw->src_stride); } } @@ -990,6 +995,9 @@ static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac) static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version) { struct axi_dmac_chan *chan = &dmac->chan; + struct device *dev = dmac->dma_dev.dev; + u32 mask; + int ret; axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) @@ -1024,6 +1032,22 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version) return -ENODEV; } + if (axi_dmac_dest_is_mem(chan)) { + axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS_HIGH, 0xffffffff); + mask = axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS_HIGH); + } else { + axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS_HIGH, 0xffffffff); + mask = axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS_HIGH); + } + + mask = 32 + fls(mask); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(mask)); + if (ret) { + dev_err(dev, "DMA mask set error %d\n", ret); + return ret; + } + if (version >= ADI_AXI_PCORE_VER(4, 2, 'a')) chan->hw_partial_xfer = true; From c23918bedc74a7809e6fa2fd1d09b860625a90b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nuno=20S=C3=A1?= Date: Tue, 4 Nov 2025 16:22:28 +0000 Subject: [PATCH 39/64] dma: dma-axi-dmac: simplify axi_dmac_parse_dt() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Simplify axi_dmac_parse_dt() by using the cleanup device_node class for automatically releasing the of_node reference when going out of scope. Signed-off-by: Nuno Sá base-commit: 398035178503bf662281bbffb4bebce1460a4bc5 change-id: 20251104-axi-dmac-fixes-and-improvs-e3ad512a329c Acked-by: Michael Hennerich Link: https://patch.msgid.link/20251104-axi-dmac-fixes-and-improvs-v1-4-3e6fd9328f72@analog.com Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 15c569449a28..045e9b9a90df 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -927,22 +928,18 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac) { - struct device_node *of_channels, *of_chan; int ret; - of_channels = of_get_child_by_name(dev->of_node, "adi,channels"); + struct device_node *of_channels __free(device_node) = of_get_child_by_name(dev->of_node, + "adi,channels"); if (of_channels == NULL) return -ENODEV; - for_each_child_of_node(of_channels, of_chan) { + for_each_child_of_node_scoped(of_channels, of_chan) { ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan); - if (ret) { - of_node_put(of_chan); - of_node_put(of_channels); + if (ret) return -EINVAL; - } } - of_node_put(of_channels); return 0; } From 0b4f3aeee766fd3cc3bf254a26b9761d9b53818b Mon Sep 17 00:00:00 2001 From: sheetal Date: Mon, 29 Sep 2025 16:29:27 +0530 Subject: [PATCH 40/64] dt-bindings: dma: Update ADMA bindings for tegra264 - Update ADMA device tree bindings for tegra264 to support up to 64 interrupt channels by setting 'interrupts' property maxItems to 64. - Also, update the 'allOf' conditional schema to ensure correct maxItems for 'interrupts' based on compatible string, including tegra210 (22) and tegra186 (32) ADMA controllers. Signed-off-by: sheetal Reviewed-by: Rob Herring (Arm) Acked-by: Thierry Reding Link: https://patch.msgid.link/20250929105930.1767294-2-sheetal@nvidia.com Signed-off-by: Vinod Koul --- .../bindings/dma/nvidia,tegra210-adma.yaml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml index da0235e451d6..269a1f7ebdbb 100644 --- a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml +++ b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml @@ -46,7 +46,7 @@ properties: Should contain all of the per-channel DMA interrupts in ascending order with respect to the DMA channel index. minItems: 1 - maxItems: 32 + maxItems: 64 clocks: description: Must contain one entry for the ADMA module clock @@ -86,6 +86,19 @@ allOf: reg: items: - description: Full address space range of DMA registers. + interrupts: + maxItems: 22 + + - if: + properties: + compatible: + contains: + enum: + - nvidia,tegra186-adma + then: + properties: + interrupts: + maxItems: 32 - if: properties: From 99e0728b38da1ee343bd3b57bda72c404c693c45 Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy Date: Thu, 25 Dec 2025 20:15:19 +0200 Subject: [PATCH 41/64] dt-bindings: dma: pl08x: Do not use plural form of a proper noun PrimeCell As a proper noun PrimeCell is a single entity and it can not have a plural form, fix the typo. Signed-off-by: Vladimir Zapolskiy Acked-by: Rob Herring (Arm) Link: https://patch.msgid.link/20251225181519.1401953-1-vz@mleia.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/arm-pl08x.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/dma/arm-pl08x.yaml b/Documentation/devicetree/bindings/dma/arm-pl08x.yaml index ab25ae63d2c3..beab36ac583f 100644 --- a/Documentation/devicetree/bindings/dma/arm-pl08x.yaml +++ b/Documentation/devicetree/bindings/dma/arm-pl08x.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/dma/arm-pl08x.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: ARM PrimeCells PL080 and PL081 and derivatives DMA controller +title: ARM PrimeCell PL080 and PL081 and derivatives DMA controller maintainers: - Vinod Koul From 0a6946644f0d1151d31212820497e1a49fe1a0a6 Mon Sep 17 00:00:00 2001 From: Khairul Anuar Romli Date: Mon, 29 Dec 2025 11:49:01 +0800 Subject: [PATCH 42/64] dt-bindings: dma: snps,dw-axi-dmac: Add compatible string for Agilex5 The address bus on Agilex5 is limited to 40 bits. When SMMU is enable this will cause address truncation and translation faults. Hence introducing "altr,agilex5-axi-dma" to enable platform specific configuration to configure the dma addressable bit mask. Add a fallback capability for the compatible property to allow driver to probe and initialize with a newly added compatible string without requiring additional entry in the driver. Signed-off-by: Khairul Anuar Romli Reviewed-by: Rob Herring (Arm) Link: https://patch.msgid.link/dbc775f114445c06c6e4ce424333e1f3cbb92583.1766966955.git.khairul.anuar.romli@altera.com Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/snps,dw-axi-dmac.yaml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml index a393a33c8908..216cda21c538 100644 --- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml +++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml @@ -17,11 +17,15 @@ allOf: properties: compatible: - enum: - - snps,axi-dma-1.01a - - intel,kmb-axi-dma - - starfive,jh7110-axi-dma - - starfive,jh8100-axi-dma + oneOf: + - enum: + - snps,axi-dma-1.01a + - intel,kmb-axi-dma + - starfive,jh7110-axi-dma + - starfive,jh8100-axi-dma + - items: + - const: altr,agilex5-axi-dma + - const: snps,axi-dma-1.01a reg: minItems: 1 From c47422f4d0a26b25ff59709921eaaf8f916eec7d Mon Sep 17 00:00:00 2001 From: Robert Marko Date: Mon, 29 Dec 2025 19:37:50 +0100 Subject: [PATCH 43/64] dt-bindings: dma: atmel: add microchip,lan9691-dma Document Microchip LAN969x DMA compatible which is compatible to SAMA7G5. Signed-off-by: Robert Marko Acked-by: Rob Herring (Arm) Link: https://patch.msgid.link/20251229184004.571837-10-robert.marko@sartura.hr Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml b/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml index 73fc13b902b3..197efb19b07a 100644 --- a/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml +++ b/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml @@ -33,7 +33,9 @@ properties: - microchip,sam9x7-dma - const: atmel,sama5d4-dma - items: - - const: microchip,sama7d65-dma + - enum: + - microchip,lan9691-dma + - microchip,sama7d65-dma - const: microchip,sama7g5-dma "#dma-cells": From d3824968dbd9056844bbd5041020a3e28c748558 Mon Sep 17 00:00:00 2001 From: Tony Han Date: Wed, 3 Dec 2025 13:11:43 +0100 Subject: [PATCH 44/64] dmaengine: at_xdmac: get the number of DMA channels from device tree In case of kernel runs in non-secure mode, the number of DMA channels can be got from device tree since the value read from GTYPE register is "0" as it's always secured. As the number of channels can never be negative, update them to the type "unsigned". This is required for LAN969x. Signed-off-by: Tony Han Signed-off-by: Robert Marko Link: https://patch.msgid.link/20251203121208.1269487-1-robert.marko@sartura.hr Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index ada96d490847..901971e8bae6 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -2247,12 +2247,29 @@ static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev) return clk_enable(atxdmac->clk); } +static inline int at_xdmac_get_channel_number(struct platform_device *pdev, + u32 reg, u32 *pchannels) +{ + int ret; + + if (reg) { + *pchannels = AT_XDMAC_NB_CH(reg); + return 0; + } + + ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", pchannels); + if (ret) + dev_err(&pdev->dev, "can't get number of channels\n"); + + return ret; +} + static int at_xdmac_probe(struct platform_device *pdev) { struct at_xdmac *atxdmac; - int irq, nr_channels, i, ret; + int irq, ret; void __iomem *base; - u32 reg; + u32 nr_channels, i, reg; irq = platform_get_irq(pdev, 0); if (irq < 0) @@ -2268,7 +2285,10 @@ static int at_xdmac_probe(struct platform_device *pdev) * of channels to do the allocation. */ reg = readl_relaxed(base + AT_XDMAC_GTYPE); - nr_channels = AT_XDMAC_NB_CH(reg); + ret = at_xdmac_get_channel_number(pdev, reg, &nr_channels); + if (ret) + return ret; + if (nr_channels > AT_XDMAC_MAX_CHAN) { dev_err(&pdev->dev, "invalid number of channels (%u)\n", nr_channels); From 8049f77fd820f47a2727c805de629a7433538eab Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy Date: Thu, 25 Dec 2025 19:38:47 +0200 Subject: [PATCH 45/64] dmaengine: pl08x: Fix comment stating the difference between PL080 and PL081 Fix a trivial typo in the comment, otherwise it takes an effort to understand what it actually means to say. Signed-off-by: Vladimir Zapolskiy Link: https://patch.msgid.link/20251225173847.1395928-1-vz@mleia.com Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 3bfb3b312027..f16a70937200 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2978,7 +2978,7 @@ out_no_pl08x: return ret; } -/* PL080 has 8 channels and the PL080 have just 2 */ +/* PL080 has 8 channels and the PL081 have just 2 */ static struct vendor_data vendor_pl080 = { .config_offset = PL080_CH_CONFIG, .channels = 8, From e0c51fd02f9cfab341907f6764d2f15c134eea55 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 6 Jan 2026 17:59:25 +0100 Subject: [PATCH 46/64] dmaengine: sh: rz-dmac: Make channel irq local The channel IRQ is only used inside the function rz_dmac_chan_probe(), so there is no need to store it in the rz_dmac_chan structure for later use. Signed-off-by: Geert Uytterhoeven Reviewed-by: Biju Das Link: https://patch.msgid.link/312c2e3349f4747e0bca861632bfc3592224b012.1767718556.git.geert+renesas@glider.be Signed-off-by: Vinod Koul --- drivers/dma/sh/rz-dmac.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c index 1f687b08d6b8..3e8d85c6639d 100644 --- a/drivers/dma/sh/rz-dmac.c +++ b/drivers/dma/sh/rz-dmac.c @@ -65,7 +65,6 @@ struct rz_dmac_chan { void __iomem *ch_base; void __iomem *ch_cmn_base; unsigned int index; - int irq; struct rz_dmac_desc *desc; int descs_allocated; @@ -795,29 +794,27 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac, struct rz_lmdesc *lmdesc; char pdev_irqname[6]; char *irqname; - int ret; + int irq, ret; channel->index = index; channel->mid_rid = -EINVAL; /* Request the channel interrupt. */ scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index); - channel->irq = platform_get_irq_byname(pdev, pdev_irqname); - if (channel->irq < 0) - return channel->irq; + irq = platform_get_irq_byname(pdev, pdev_irqname); + if (irq < 0) + return irq; irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", dev_name(dmac->dev), index); if (!irqname) return -ENOMEM; - ret = devm_request_threaded_irq(dmac->dev, channel->irq, - rz_dmac_irq_handler, + ret = devm_request_threaded_irq(dmac->dev, irq, rz_dmac_irq_handler, rz_dmac_irq_handler_thread, 0, irqname, channel); if (ret) { - dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", - channel->irq, ret); + dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret); return ret; } From 8308510b93650dcd83a7c6b9753dec1f90ca3e0c Mon Sep 17 00:00:00 2001 From: Yi Sun Date: Wed, 7 Jan 2026 16:02:22 -0800 Subject: [PATCH 47/64] dmaengine: idxd: Expose DSA3.0 capabilities through sysfs Introduce sysfs interfaces for 3 new Data Streaming Accelerator (DSA) capability registers (dsacap0-2) to enable userspace awareness of hardware features in DSA version 3 and later devices. Userspace components (e.g. configure libraries, workload Apps) require this information to: 1. Select optimal data transfer strategies based on SGL capabilities 2. Enable hardware-specific optimizations for floating-point operations 3. Configure memory operations with proper numerical handling 4. Verify compute operation compatibility before submitting jobs The output format is ,,, where each DSA capability value is a 64-bit hexadecimal number, separated by commas. The ordering follows the DSA 3.0 specification layout: Offset: 0x190 0x188 0x180 Reg: dsacap2 dsacap1 dsacap0 Example: cat /sys/bus/dsa/devices/dsa0/dsacaps 000000000000f18d,0014000e000007aa,00fa01ff01ff03ff According to the DSA 3.0 specification, there are 15 fields defined for the three dsacap registers. However, there's no need to define all register structures unless a use case requires them. At this point, support for the Scatter-Gather List (SGL) located in dsacap0 is necessary, so only dsacap0 is defined accordingly. For reference, the DSA 3.0 specification is available at: Link: https://software.intel.com/content/www/us/en/develop/articles/intel-data-streaming-accelerator-architecture-specification.html Signed-off-by: Yi Sun Co-developed-by: Anil S Keshavamurthy Signed-off-by: Anil S Keshavamurthy Reviewed-by: Dave Jiang Tested-by: Yi Lai Acked-by: Vinicius Costa Gomes Link: https://patch.msgid.link/20260107-idxd-yi-sun-dsa3-sgl-size-v2-1-dbef8f559e48@intel.com Signed-off-by: Vinod Koul --- .../ABI/stable/sysfs-driver-dma-idxd | 15 +++++++++++ drivers/dma/idxd/idxd.h | 3 +++ drivers/dma/idxd/init.c | 6 +++++ drivers/dma/idxd/registers.h | 25 +++++++++++++++++++ drivers/dma/idxd/sysfs.c | 24 ++++++++++++++++++ 5 files changed, 73 insertions(+) diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd index 4a355e6747ae..08d030159f09 100644 --- a/Documentation/ABI/stable/sysfs-driver-dma-idxd +++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd @@ -136,6 +136,21 @@ Description: The last executed device administrative command's status/error. Also last configuration error overloaded. Writing to it will clear the status. +What: /sys/bus/dsa/devices/dsa/dsacaps +Date: April 5, 2026 +KernelVersion: 6.20.0 +Contact: dmaengine@vger.kernel.org +Description: The DSA3 specification introduces three new capability + registers: dsacap[0-2]. User components (e.g., configuration + libraries and workload applications) require this information + to properly utilize the DSA3 features. + This includes SGL capability support, Enabling hardware-specific + optimizations, Configuring memory, etc. + The output format is ',,' where each + DSA cap value is a 64 bit hex value. + This attribute should only be visible on DSA devices of version + 3 or later. + What: /sys/bus/dsa/devices/dsa/iaa_cap Date: Sept 14, 2022 KernelVersion: 6.0.0 diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 74e6695881e6..cc0a3fe1c957 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -252,6 +252,9 @@ struct idxd_hw { struct opcap opcap; u32 cmd_cap; union iaa_cap_reg iaa_cap; + union dsacap0_reg dsacap0; + union dsacap1_reg dsacap1; + union dsacap2_reg dsacap2; }; enum idxd_device_state { diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 2acc34b3daff..2bdd1b34d50a 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -585,6 +585,12 @@ static void idxd_read_caps(struct idxd_device *idxd) } multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); + if (idxd->hw.version >= DEVICE_VERSION_3) { + idxd->hw.dsacap0.bits = ioread64(idxd->reg_base + IDXD_DSACAP0_OFFSET); + idxd->hw.dsacap1.bits = ioread64(idxd->reg_base + IDXD_DSACAP1_OFFSET); + idxd->hw.dsacap2.bits = ioread64(idxd->reg_base + IDXD_DSACAP2_OFFSET); + } + /* read iaa cap */ if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2) idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index 8dc2e8bca779..85e83a61a50b 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -18,6 +18,7 @@ #define DEVICE_VERSION_1 0x100 #define DEVICE_VERSION_2 0x200 +#define DEVICE_VERSION_3 0x300 #define IDXD_MMIO_BAR 0 #define IDXD_WQ_BAR 2 @@ -587,6 +588,30 @@ union evl_status_reg { u64 bits; }; +#define IDXD_DSACAP0_OFFSET 0x180 +union dsacap0_reg { + u64 bits; + struct { + u64 max_sgl_shift:4; + u64 max_gr_block_shift:4; + u64 ops_inter_domain:7; + u64 rsvd1:17; + u64 sgl_formats:16; + u64 max_sg_process:8; + u64 rsvd2:8; + }; +}; + +#define IDXD_DSACAP1_OFFSET 0x188 +union dsacap1_reg { + u64 bits; +}; + +#define IDXD_DSACAP2_OFFSET 0x190 +union dsacap2_reg { + u64 bits; +}; + #define IDXD_MAX_BATCH_IDENT 256 struct __evl_entry { diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 9f0701021af0..cc2c83d7f710 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1713,6 +1713,18 @@ static ssize_t event_log_size_store(struct device *dev, } static DEVICE_ATTR_RW(event_log_size); +static ssize_t dsacaps_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = confdev_to_idxd(dev); + + return sysfs_emit(buf, "%016llx,%016llx,%016llx\n", + (u64)idxd->hw.dsacap2.bits, + (u64)idxd->hw.dsacap1.bits, + (u64)idxd->hw.dsacap0.bits); +} +static DEVICE_ATTR_RO(dsacaps); + static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr, struct idxd_device *idxd) { @@ -1750,6 +1762,14 @@ static bool idxd_device_attr_event_log_size_invisible(struct attribute *attr, !idxd->hw.gen_cap.evl_support); } +static bool idxd_device_attr_dsacaps_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + return attr == &dev_attr_dsacaps.attr && + (idxd->data->type != IDXD_TYPE_DSA || + idxd->hw.version < DEVICE_VERSION_3); +} + static umode_t idxd_device_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { @@ -1768,6 +1788,9 @@ static umode_t idxd_device_attr_visible(struct kobject *kobj, if (idxd_device_attr_event_log_size_invisible(attr, idxd)) return 0; + if (idxd_device_attr_dsacaps_invisible(attr, idxd)) + return 0; + return attr->mode; } @@ -1795,6 +1818,7 @@ static struct attribute *idxd_device_attributes[] = { &dev_attr_cmd_status.attr, &dev_attr_iaa_cap.attr, &dev_attr_event_log_size.attr, + &dev_attr_dsacaps.attr, NULL, }; From fe7b87d908da33326fbf6fe2b3830426432ec66c Mon Sep 17 00:00:00 2001 From: Yi Sun Date: Wed, 7 Jan 2026 16:02:23 -0800 Subject: [PATCH 48/64] dmaengine: idxd: Add Max SGL Size Support for DSA3.0 Certain DSA 3.0 opcodes, such as Gather copy and Gather reduce, require max SGL configured for workqueues prior to supporting these opcodes. Configure the maximum scatter-gather list (SGL) size for workqueues during setup on the supported HW. Application can then properly handle the SGL size without explicitly setting it. Signed-off-by: Yi Sun Co-developed-by: Anil S Keshavamurthy Signed-off-by: Anil S Keshavamurthy Reviewed-by: Dave Jiang Tested-by: Yi Lai Acked-by: Vinicius Costa Gomes Link: https://patch.msgid.link/20260107-idxd-yi-sun-dsa3-sgl-size-v2-2-dbef8f559e48@intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 5 +++++ drivers/dma/idxd/idxd.h | 16 ++++++++++++++++ drivers/dma/idxd/init.c | 5 +++++ drivers/dma/idxd/registers.h | 3 ++- 4 files changed, 28 insertions(+), 1 deletion(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index c2cdf41b6e57..c26128529ff4 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -390,6 +390,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq) memset(wq->name, 0, WQ_NAME_SIZE); wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); + idxd_wq_set_init_max_sgl_size(idxd, wq); if (wq->opcap_bmap) bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } @@ -989,6 +990,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq) /* bytes 12-15 */ wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes); idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size)); + if (idxd_sgl_supported(idxd)) + wq->wqcfg->max_sgl_shift = ilog2(wq->max_sgl_size); /* bytes 32-63 */ if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) { @@ -1167,6 +1170,8 @@ static int idxd_wq_load_config(struct idxd_wq *wq) wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift; idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift); + if (idxd_sgl_supported(idxd)) + wq->max_sgl_size = 1U << wq->wqcfg->max_sgl_shift; for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index cc0a3fe1c957..ea8c4daed38d 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -227,6 +227,7 @@ struct idxd_wq { char name[WQ_NAME_SIZE + 1]; u64 max_xfer_bytes; u32 max_batch_size; + u32 max_sgl_size; /* Lock to protect upasid_xa access. */ struct mutex uc_lock; @@ -348,6 +349,7 @@ struct idxd_device { u64 max_xfer_bytes; u32 max_batch_size; + u32 max_sgl_size; int max_groups; int max_engines; int max_rdbufs; @@ -692,6 +694,20 @@ static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq, wq->max_batch_size = max_batch_size; } +static bool idxd_sgl_supported(struct idxd_device *idxd) +{ + return idxd->data->type == IDXD_TYPE_DSA && + idxd->hw.version >= DEVICE_VERSION_3 && + idxd->hw.dsacap0.sgl_formats; +} + +static inline void idxd_wq_set_init_max_sgl_size(struct idxd_device *idxd, + struct idxd_wq *wq) +{ + if (idxd_sgl_supported(idxd)) + wq->max_sgl_size = 1U << idxd->hw.dsacap0.max_sgl_shift; +} + static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg, u32 max_batch_shift) { diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 2bdd1b34d50a..fb80803d5b57 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -222,6 +222,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd) init_completion(&wq->wq_resurrect); wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); + idxd_wq_set_init_max_sgl_size(idxd, wq); wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); if (!wq->wqcfg) { @@ -590,6 +591,10 @@ static void idxd_read_caps(struct idxd_device *idxd) idxd->hw.dsacap1.bits = ioread64(idxd->reg_base + IDXD_DSACAP1_OFFSET); idxd->hw.dsacap2.bits = ioread64(idxd->reg_base + IDXD_DSACAP2_OFFSET); } + if (idxd_sgl_supported(idxd)) { + idxd->max_sgl_size = 1U << idxd->hw.dsacap0.max_sgl_shift; + dev_dbg(dev, "max sgl size: %u\n", idxd->max_sgl_size); + } /* read iaa cap */ if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2) diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index 85e83a61a50b..f95411363ea9 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -390,7 +390,8 @@ union wqcfg { /* bytes 12-15 */ u32 max_xfer_shift:5; u32 max_batch_shift:4; - u32 rsvd4:23; + u32 max_sgl_shift:4; + u32 rsvd4:19; /* bytes 16-19 */ u16 occupancy_inth; From 80c70bfb95cdbe0c644070f4ca4754a60f0a4830 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:12 +0100 Subject: [PATCH 49/64] scatterlist: introduce sg_nents_for_dma() helper Sometimes the user needs to split each entry on the mapped scatter list due to DMA length constrains. This helper returns a number of entities assuming that each of them is not bigger than supplied maximum length. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-2-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/scatterlist.h | 2 ++ lib/scatterlist.c | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 29f6ceb98d74..6de1a2434299 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -441,6 +441,8 @@ static inline void sg_init_marker(struct scatterlist *sgl, int sg_nents(struct scatterlist *sg); int sg_nents_for_len(struct scatterlist *sg, u64 len); +int sg_nents_for_dma(struct scatterlist *sgl, unsigned int sglen, size_t len); + struct scatterlist *sg_last(struct scatterlist *s, unsigned int); void sg_init_table(struct scatterlist *, unsigned int); void sg_init_one(struct scatterlist *, const void *, unsigned int); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 4af1c8b0775a..21bc9c1f7c06 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -64,6 +64,32 @@ int sg_nents_for_len(struct scatterlist *sg, u64 len) } EXPORT_SYMBOL(sg_nents_for_len); +/** + * sg_nents_for_dma - return the count of DMA-capable entries in scatterlist + * @sgl: The scatterlist + * @sglen: The current number of entries + * @len: The maximum length of DMA-capable block + * + * Description: + * Determines the number of entries in @sgl which would be permitted in + * DMA-capable transfer if list had been split accordingly, taking into + * account chaining as well. + * + * Returns: + * the number of sgl entries needed + * + **/ +int sg_nents_for_dma(struct scatterlist *sgl, unsigned int sglen, size_t len) +{ + struct scatterlist *sg; + int i, nents = 0; + + for_each_sg(sgl, sg, sglen, i) + nents += DIV_ROUND_UP(sg_dma_len(sg), len); + return nents; +} +EXPORT_SYMBOL(sg_nents_for_dma); + /** * sg_last - return the last scatterlist entry in a list * @sgl: First entry in the scatterlist From 47f5cb7878cc62ed95981c5d02862b253eddb590 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:13 +0100 Subject: [PATCH 50/64] dmaengine: altera-msgdma: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-3-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/altera-msgdma.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index a203fdd84950..50534a6045a0 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c @@ -396,13 +396,11 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, void *desc = NULL; size_t len, avail; dma_addr_t dma_dst, dma_src; - u32 desc_cnt = 0, i; - struct scatterlist *sg; + u32 desc_cnt; u32 stride; unsigned long irqflags; - for_each_sg(sgl, sg, sg_len, i) - desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); + desc_cnt = sg_nents_for_dma(sgl, sg_len, MSGDMA_MAX_TRANS_LEN); spin_lock_irqsave(&mdev->lock, irqflags); if (desc_cnt > mdev->desc_free_cnt) { From 024ae9d3092c425f3ea6eae92086a2001ca2e0c7 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:14 +0100 Subject: [PATCH 51/64] dmaengine: axi-dmac: use sg_nents_for_dma() helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Reviewed-by: Nuno Sá Link: https://patch.msgid.link/20260108105619.3513561-4-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 045e9b9a90df..f5caf75dc0e7 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -677,10 +677,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( if (direction != chan->direction) return NULL; - num_sgs = 0; - for_each_sg(sgl, sg, sg_len, i) - num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length); - + num_sgs = sg_nents_for_dma(sgl, sg_len, chan->max_length); desc = axi_dmac_alloc_desc(chan, num_sgs); if (!desc) return NULL; From 39110c68500a149664bafb0c174baef0d42e2129 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:15 +0100 Subject: [PATCH 52/64] dmaengine: bcm2835-dma: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-5-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/bcm2835-dma.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 321748e2983e..3f638c3e81cd 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -260,23 +260,6 @@ static void bcm2835_dma_create_cb_set_length( control_block->info |= finalextrainfo; } -static inline size_t bcm2835_dma_count_frames_for_sg( - struct bcm2835_chan *c, - struct scatterlist *sgl, - unsigned int sg_len) -{ - size_t frames = 0; - struct scatterlist *sgent; - unsigned int i; - size_t plength = bcm2835_dma_max_frame_length(c); - - for_each_sg(sgl, sgent, sg_len, i) - frames += bcm2835_dma_frames_for_length( - sg_dma_len(sgent), plength); - - return frames; -} - /** * bcm2835_dma_create_cb_chain - create a control block and fills data in * @@ -672,7 +655,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg( } /* count frames in sg list */ - frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len); + frames = sg_nents_for_dma(sgl, sg_len, bcm2835_dma_max_frame_length(c)); /* allocate the CB chain */ d = bcm2835_dma_create_cb_chain(chan, direction, false, From 5d6ceb254fa9ac1f50932c42a0a9a8bedaa3190d Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:16 +0100 Subject: [PATCH 53/64] dmaengine: dw-axi-dmac: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-6-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index b23536645ff7..493c2a32b0fe 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -850,7 +850,7 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, unsigned int loop = 0; struct scatterlist *sg; size_t axi_block_len; - u32 len, num_sgs = 0; + u32 len, num_sgs; unsigned int i; dma_addr_t mem; int status; @@ -867,9 +867,7 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, if (axi_block_len == 0) return NULL; - for_each_sg(sgl, sg, sg_len, i) - num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len); - + num_sgs = sg_nents_for_dma(sgl, sg_len, axi_block_len); desc = axi_desc_alloc(num_sgs); if (unlikely(!desc)) goto err_desc_get; From 3fc49d21f3a46866724ff8ef8a79c6e2cd9d7676 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:17 +0100 Subject: [PATCH 54/64] dmaengine: k3dma: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-7-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 0f9cd7815f88..63677c0b6f18 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -536,19 +536,14 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( size_t len, avail, total = 0; struct scatterlist *sg; dma_addr_t addr, src = 0, dst = 0; - int num = sglen, i; + int num, i; if (sgl == NULL) return NULL; c->cyclic = 0; - for_each_sg(sgl, sg, sglen, i) { - avail = sg_dma_len(sg); - if (avail > DMA_MAX_SIZE) - num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; - } - + num = sg_nents_for_dma(sgl, sglen, DMA_MAX_SIZE); ds = k3_dma_alloc_desc_resource(num, chan); if (!ds) return NULL; From f9b0274f53a2d464e71f37e8f4d0d0dc41321259 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:18 +0100 Subject: [PATCH 55/64] dmaengine: lgm: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-8-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/lgm/lgm-dma.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c index 8173c3f1075a..a7b9cf30f6ad 100644 --- a/drivers/dma/lgm/lgm-dma.c +++ b/drivers/dma/lgm/lgm-dma.c @@ -1164,8 +1164,8 @@ ldma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct dw2_desc *hw_ds; struct dw2_desc_sw *ds; struct scatterlist *sg; - int num = sglen, i; dma_addr_t addr; + int num, i; if (!sgl) return NULL; @@ -1173,12 +1173,7 @@ ldma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (d->ver > DMA_VER22) return ldma_chan_desc_cfg(chan, sgl->dma_address, sglen); - for_each_sg(sgl, sg, sglen, i) { - avail = sg_dma_len(sg); - if (avail > DMA_MAX_SIZE) - num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; - } - + num = sg_nents_for_dma(sgl, sglen, DMA_MAX_SIZE); ds = dma_alloc_desc_resource(num, c); if (!ds) return NULL; From 068942eaa232ba752b744d98ff8ab22b26c8bff4 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:19 +0100 Subject: [PATCH 56/64] dmaengine: pxa-dma: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-9-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/pxa_dma.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 249296389771..b639c8b51e87 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -970,7 +970,7 @@ pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, struct scatterlist *sg; dma_addr_t dma; u32 dcmd, dsadr = 0, dtadr = 0; - unsigned int nb_desc = 0, i, j = 0; + unsigned int nb_desc, i, j = 0; if ((sgl == NULL) || (sg_len == 0)) return NULL; @@ -979,8 +979,7 @@ pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, dev_dbg(&chan->vc.chan.dev->device, "%s(): dir=%d flags=%lx\n", __func__, dir, flags); - for_each_sg(sgl, sg, sg_len, i) - nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES); + nb_desc = sg_nents_for_dma(sgl, sg_len, PDMA_MAX_DESC_BYTES); sw_desc = pxad_alloc_desc(chan, nb_desc + 1); if (!sw_desc) return NULL; From 425f871d7acdb521d67c2578e6ae688a751d1e80 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:20 +0100 Subject: [PATCH 57/64] dmaengine: qcom: adm: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-10-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/qcom/qcom_adm.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c index 6be54fddcee1..490edad20ae6 100644 --- a/drivers/dma/qcom/qcom_adm.c +++ b/drivers/dma/qcom/qcom_adm.c @@ -390,16 +390,15 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan, } /* iterate through sgs and compute allocation size of structures */ - for_each_sg(sgl, sg, sg_len, i) { - if (achan->slave.device_fc) { + if (achan->slave.device_fc) { + for_each_sg(sgl, sg, sg_len, i) { box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst, ADM_MAX_ROWS); if (sg_dma_len(sg) % burst) single_count++; - } else { - single_count += DIV_ROUND_UP(sg_dma_len(sg), - ADM_MAX_XFER); } + } else { + single_count = sg_nents_for_dma(sgl, sg_len, ADM_MAX_XFER); } async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT); From 107fdf0c4e944030bf544aea98e8ae8537914177 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:21 +0100 Subject: [PATCH 58/64] dmaengine: qcom: bam_dma: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-11-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index bcd8de9a9a12..e184cebbface 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -653,22 +653,17 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sg; u32 i; struct bam_desc_hw *desc; - unsigned int num_alloc = 0; - + unsigned int num_alloc; if (!is_slave_direction(direction)) { dev_err(bdev->dev, "invalid dma direction\n"); return NULL; } - /* calculate number of required entries */ - for_each_sg(sgl, sg, sg_len, i) - num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); - /* allocate enough room to accommodate the number of entries */ + num_alloc = sg_nents_for_dma(sgl, sg_len, BAM_FIFO_SIZE); async_desc = kzalloc(struct_size(async_desc, desc, num_alloc), GFP_NOWAIT); - if (!async_desc) return NULL; From d7785661010e2fe113aec2500f988a8e73ac3e7b Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:22 +0100 Subject: [PATCH 59/64] dmaengine: sa11x0: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-12-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/sa11x0-dma.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index dc1a9a05252e..86f1d7461f56 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -526,7 +526,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_desc *txd; struct scatterlist *sgent; - unsigned i, j = sglen; + unsigned int i, j; size_t size = 0; /* SA11x0 channels can only operate in their native direction */ @@ -542,10 +542,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( for_each_sg(sg, sgent, sglen, i) { dma_addr_t addr = sg_dma_address(sgent); - unsigned int len = sg_dma_len(sgent); - if (len > DMA_MAX_SIZE) - j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; if (addr & DMA_ALIGN) { dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n", &c->vc, &addr); @@ -553,6 +550,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( } } + j = sg_nents_for_dma(sg, sglen, DMA_MAX_SIZE & ~DMA_ALIGN); txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC); if (!txd) { dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); From ac326dca6870f0d8e0787e94b1d9c2c91bb358d7 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:23 +0100 Subject: [PATCH 60/64] dmaengine: sh: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-13-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma-base.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 1e4b4d6069c0..3ff2a8be8faa 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -577,12 +577,11 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, struct scatterlist *sg; struct shdma_desc *first = NULL, *new = NULL /* compiler... */; LIST_HEAD(tx_list); - int chunks = 0; + int chunks; unsigned long irq_flags; int i; - for_each_sg(sgl, sg, sg_len, i) - chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); + chunks = sg_nents_for_dma(sgl, sg_len, schan->max_xfer_len); /* Have to lock the whole loop to protect against concurrent release */ spin_lock_irqsave(&schan->chan_lock, irq_flags); From 3c8a86ed002ab8fb287ee4ec92f0fd6ac5b291d2 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 Jan 2026 11:50:24 +0100 Subject: [PATCH 61/64] dmaengine: xilinx: xdma: use sg_nents_for_dma() helper Instead of open coded variant let's use recently introduced helper. Reviewed-by: Bjorn Andersson Signed-off-by: Andy Shevchenko Link: https://patch.msgid.link/20260108105619.3513561-14-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xdma.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c index 0d88b1a670e1..be6c22e1020a 100644 --- a/drivers/dma/xilinx/xdma.c +++ b/drivers/dma/xilinx/xdma.c @@ -605,13 +605,11 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, struct xdma_chan *xdma_chan = to_xdma_chan(chan); struct dma_async_tx_descriptor *tx_desc; struct xdma_desc *sw_desc; - u32 desc_num = 0, i; u64 addr, dev_addr, *src, *dst; + u32 desc_num, i; struct scatterlist *sg; - for_each_sg(sgl, sg, sg_len, i) - desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); - + desc_num = sg_nents_for_dma(sgl, sg_len, XDMA_DESC_BLEN_MAX); sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false); if (!sw_desc) return NULL; From 666c53e94c1d0bf0bdf14c49505ece9ddbe725bc Mon Sep 17 00:00:00 2001 From: Jared Kangas Date: Tue, 13 Jan 2026 11:46:50 -0800 Subject: [PATCH 62/64] dmaengine: fsl-edma: don't explicitly disable clocks in .remove() The clocks in fsl_edma_engine::muxclk are allocated and enabled with devm_clk_get_enabled(), which automatically cleans these resources up, but these clocks are also manually disabled in fsl_edma_remove(). This causes warnings on driver removal for each clock: edma_module already disabled WARNING: CPU: 0 PID: 418 at drivers/clk/clk.c:1200 clk_core_disable+0x198/0x1c8 [...] Call trace: clk_core_disable+0x198/0x1c8 (P) clk_disable+0x34/0x58 fsl_edma_remove+0x74/0xe8 [fsl_edma] [...] ---[ end trace 0000000000000000 ]--- edma_module already unprepared WARNING: CPU: 0 PID: 418 at drivers/clk/clk.c:1059 clk_core_unprepare+0x1f8/0x220 [...] Call trace: clk_core_unprepare+0x1f8/0x220 (P) clk_unprepare+0x34/0x58 fsl_edma_remove+0x7c/0xe8 [fsl_edma] [...] ---[ end trace 0000000000000000 ]--- Fix these warnings by removing the unnecessary fsl_disable_clocks() call in fsl_edma_remove(). Fixes: a9903de3aa16 ("dmaengine: fsl-edma: refactor using devm_clk_get_enabled") Signed-off-by: Jared Kangas Reviewed-by: Frank Li Link: https://patch.msgid.link/20260113-fsl-edma-clock-removal-v1-1-2025b49e7bcc@redhat.com Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma-main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index a753b7cbfa7a..dbcdd1e68319 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -915,7 +915,6 @@ static void fsl_edma_remove(struct platform_device *pdev) of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); - fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); } static int fsl_edma_suspend_late(struct device *dev) From 876cbb60227fcfbcfcabf458eee5bc52cf5fbac0 Mon Sep 17 00:00:00 2001 From: Pankaj Patil Date: Wed, 31 Dec 2025 19:01:14 +0530 Subject: [PATCH 63/64] dt-bindings: dma: qcom,gpi: Update max interrupts lines to 16 Update interrupt maxItems to 16 from 13 per GPI instance to support Glymur, Qualcomm's latest gen SoC Signed-off-by: Pankaj Patil Acked-by: Rob Herring (Arm) Link: https://patch.msgid.link/20251231133114.2752822-1-pankaj.patil@oss.qualcomm.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/qcom,gpi.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml index 4cd867854a5f..fde1df035ad1 100644 --- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml +++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml @@ -60,7 +60,7 @@ properties: description: Interrupt lines for each GPI instance minItems: 1 - maxItems: 13 + maxItems: 16 "#dma-cells": const: 3 From ab736ed52e3409b58a4888715e4425b6e8ac444f Mon Sep 17 00:00:00 2001 From: Frank Li Date: Thu, 29 Jan 2026 23:20:39 -0500 Subject: [PATCH 64/64] dmaengine: add Frank Li as reviewer Frank Li maintains the Freescale eDMA driver, has worked on DW eDMA improvements, and actively helps review DMA-related code. Signed-off-by: Frank Li Link: https://patch.msgid.link/20260130042039.1842939-1-Frank.Li@nxp.com Signed-off-by: Vinod Koul --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 5b11839cba9d..f630328ca6ae 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7424,6 +7424,7 @@ K: \bdma_(?:buf|fence|resv)\b DMA GENERIC OFFLOAD ENGINE SUBSYSTEM M: Vinod Koul +R: Frank Li L: dmaengine@vger.kernel.org S: Maintained Q: https://patchwork.kernel.org/project/linux-dmaengine/list/