2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

io_uring/zcrx: introduce io_populate_area_dma

Add a helper that initialises page-pool dma addresses from a sg table.
It'll be reused in following patches.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: David Wei <dw@davidwei.uk>
Link: https://lore.kernel.org/r/a8972a77be9b5675abc585d6e2e6e30f9c7dbd85.1751466461.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2025-07-02 15:29:06 +01:00 committed by Jens Axboe
parent 06897ddfc5
commit 54e89a93ef

View File

@ -47,6 +47,35 @@ static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
return area->mem.pages[net_iov_idx(niov)]; return area->mem.pages[net_iov_idx(niov)];
} }
static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
struct io_zcrx_area *area,
struct sg_table *sgt, unsigned long off)
{
struct scatterlist *sg;
unsigned i, niov_idx = 0;
for_each_sgtable_dma_sg(sgt, sg, i) {
dma_addr_t dma = sg_dma_address(sg);
unsigned long sg_len = sg_dma_len(sg);
unsigned long sg_off = min(sg_len, off);
off -= sg_off;
sg_len -= sg_off;
dma += sg_off;
while (sg_len && niov_idx < area->nia.num_niovs) {
struct net_iov *niov = &area->nia.niovs[niov_idx];
if (net_mp_niov_set_dma_addr(niov, dma))
return -EFAULT;
sg_len -= PAGE_SIZE;
dma += PAGE_SIZE;
niov_idx++;
}
}
return 0;
}
static void io_release_dmabuf(struct io_zcrx_mem *mem) static void io_release_dmabuf(struct io_zcrx_mem *mem)
{ {
if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER)) if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
@ -121,33 +150,10 @@ err:
static int io_zcrx_map_area_dmabuf(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) static int io_zcrx_map_area_dmabuf(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
{ {
unsigned long off = area->mem.dmabuf_offset;
struct scatterlist *sg;
unsigned i, niov_idx = 0;
if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER)) if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
return -EINVAL; return -EINVAL;
return io_populate_area_dma(ifq, area, area->mem.sgt,
for_each_sgtable_dma_sg(area->mem.sgt, sg, i) { area->mem.dmabuf_offset);
dma_addr_t dma = sg_dma_address(sg);
unsigned long sg_len = sg_dma_len(sg);
unsigned long sg_off = min(sg_len, off);
off -= sg_off;
sg_len -= sg_off;
dma += sg_off;
while (sg_len && niov_idx < area->nia.num_niovs) {
struct net_iov *niov = &area->nia.niovs[niov_idx];
if (net_mp_niov_set_dma_addr(niov, dma))
return -EFAULT;
sg_len -= PAGE_SIZE;
dma += PAGE_SIZE;
niov_idx++;
}
}
return 0;
} }
static int io_import_umem(struct io_zcrx_ifq *ifq, static int io_import_umem(struct io_zcrx_ifq *ifq,