mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
dma-mapping: export new dma_*map_phys() interface
Introduce new DMA mapping functions dma_map_phys() and dma_unmap_phys() that operate directly on physical addresses instead of page+offset parameters. This provides a more efficient interface for drivers that already have physical addresses available. The new functions are implemented as the primary mapping layer, with the existing dma_map_page_attrs()/dma_map_resource() and dma_unmap_page_attrs()/dma_unmap_resource() functions converted to simple wrappers around the phys-based implementations. In case dma_map_page_attrs(), the struct page is converted to physical address with help of page_to_phys() function and dma_map_resource() provides physical address as is together with addition of DMA_ATTR_MMIO attribute. The old page-based API is preserved in mapping.c to ensure that existing code won't be affected by changing EXPORT_SYMBOL to EXPORT_SYMBOL_GPL variant for dma_*map_phys(). Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/54cc52af91777906bbe4a386113437ba0bcfba9c.1757423202.git.leonro@nvidia.com
This commit is contained in:
committed by
Marek Szyprowski
parent
bf0ecb3c32
commit
f7326196a7
@@ -1556,20 +1556,6 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
__iommu_dma_unmap(dev, start, end - start);
|
||||
}
|
||||
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return __iommu_dma_map(dev, phys, size,
|
||||
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
|
||||
dma_get_mask(dev));
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(dev, handle, size);
|
||||
}
|
||||
|
||||
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
|
||||
{
|
||||
size_t alloc_size = PAGE_ALIGN(size);
|
||||
|
||||
@@ -149,7 +149,5 @@ void dma_direct_free_pages(struct device *dev, size_t size,
|
||||
struct page *page, dma_addr_t dma_addr,
|
||||
enum dma_data_direction dir);
|
||||
int dma_direct_supported(struct device *dev, u64 mask);
|
||||
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
|
||||
#endif /* _LINUX_DMA_DIRECT_H */
|
||||
|
||||
@@ -138,6 +138,10 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
unsigned long attrs);
|
||||
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
@@ -192,6 +196,15 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline unsigned int dma_map_sg_attrs(struct device *dev,
|
||||
struct scatterlist *sg, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
|
||||
@@ -42,10 +42,6 @@ size_t iommu_dma_opt_mapping_size(void);
|
||||
size_t iommu_dma_max_mapping_size(struct device *dev);
|
||||
void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, unsigned long attrs);
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
|
||||
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
|
||||
void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
|
||||
|
||||
@@ -73,7 +73,6 @@ DEFINE_EVENT(dma_map, name, \
|
||||
TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs))
|
||||
|
||||
DEFINE_MAP_EVENT(dma_map_phys);
|
||||
DEFINE_MAP_EVENT(dma_map_resource);
|
||||
|
||||
DECLARE_EVENT_CLASS(dma_unmap,
|
||||
TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
|
||||
@@ -111,7 +110,6 @@ DEFINE_EVENT(dma_unmap, name, \
|
||||
TP_ARGS(dev, addr, size, dir, attrs))
|
||||
|
||||
DEFINE_UNMAP_EVENT(dma_unmap_phys);
|
||||
DEFINE_UNMAP_EVENT(dma_unmap_resource);
|
||||
|
||||
DECLARE_EVENT_CLASS(dma_alloc_class,
|
||||
TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
|
||||
|
||||
@@ -38,7 +38,6 @@ enum {
|
||||
dma_debug_single,
|
||||
dma_debug_sg,
|
||||
dma_debug_coherent,
|
||||
dma_debug_resource,
|
||||
dma_debug_noncoherent,
|
||||
dma_debug_phy,
|
||||
};
|
||||
@@ -142,7 +141,6 @@ static const char *type2name[] = {
|
||||
[dma_debug_single] = "single",
|
||||
[dma_debug_sg] = "scatter-gather",
|
||||
[dma_debug_coherent] = "coherent",
|
||||
[dma_debug_resource] = "resource",
|
||||
[dma_debug_noncoherent] = "noncoherent",
|
||||
[dma_debug_phy] = "phy",
|
||||
};
|
||||
@@ -1446,47 +1444,6 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
check_unmap(&ref);
|
||||
}
|
||||
|
||||
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
|
||||
int direction, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_debug_entry *entry;
|
||||
|
||||
if (unlikely(dma_debug_disabled()))
|
||||
return;
|
||||
|
||||
entry = dma_entry_alloc();
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
entry->type = dma_debug_resource;
|
||||
entry->dev = dev;
|
||||
entry->paddr = addr;
|
||||
entry->size = size;
|
||||
entry->dev_addr = dma_addr;
|
||||
entry->direction = direction;
|
||||
entry->map_err_type = MAP_ERR_NOT_CHECKED;
|
||||
|
||||
add_dma_entry(entry, attrs);
|
||||
}
|
||||
|
||||
void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, int direction)
|
||||
{
|
||||
struct dma_debug_entry ref = {
|
||||
.type = dma_debug_resource,
|
||||
.dev = dev,
|
||||
.dev_addr = dma_addr,
|
||||
.size = size,
|
||||
.direction = direction,
|
||||
};
|
||||
|
||||
if (unlikely(dma_debug_disabled()))
|
||||
return;
|
||||
|
||||
check_unmap(&ref);
|
||||
}
|
||||
|
||||
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
{
|
||||
|
||||
@@ -30,14 +30,6 @@ extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
extern void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *virt, dma_addr_t addr);
|
||||
|
||||
extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
int direction);
|
||||
@@ -95,19 +87,6 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_unmap_resource(struct device *dev,
|
||||
dma_addr_t dma_addr, size_t size,
|
||||
int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
|
||||
@@ -497,22 +497,6 @@ out_unmap:
|
||||
return ret;
|
||||
}
|
||||
|
||||
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
dma_addr_t dma_addr = paddr;
|
||||
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
|
||||
dev_err_once(dev,
|
||||
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
WARN_ON_ONCE(1);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
|
||||
@@ -152,12 +152,10 @@ static inline bool dma_map_direct(struct device *dev,
|
||||
return dma_go_direct(dev, *dev->dma_mask, ops);
|
||||
}
|
||||
|
||||
dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
bool is_mmio = attrs & DMA_ATTR_MMIO;
|
||||
dma_addr_t addr;
|
||||
|
||||
@@ -177,6 +175,9 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
|
||||
addr = ops->map_resource(dev, phys, size, dir, attrs);
|
||||
} else {
|
||||
struct page *page = phys_to_page(phys);
|
||||
size_t offset = offset_in_page(phys);
|
||||
|
||||
/*
|
||||
* The dma_ops API contract for ops->map_page() requires
|
||||
* kmappable memory, while ops->map_resource() does not.
|
||||
@@ -191,9 +192,26 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_map_phys);
|
||||
|
||||
dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_API_DEBUG) &&
|
||||
WARN_ON_ONCE(is_zone_device_page(page)))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
return dma_map_phys(dev, phys, size, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_page_attrs);
|
||||
|
||||
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||
void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
@@ -213,6 +231,16 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||
trace_dma_unmap_phys(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_phys(dev, addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_unmap_phys);
|
||||
|
||||
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return;
|
||||
|
||||
dma_unmap_phys(dev, addr, size, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_page_attrs);
|
||||
|
||||
static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
@@ -338,41 +366,18 @@ EXPORT_SYMBOL(dma_unmap_sg_attrs);
|
||||
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr = DMA_MAPPING_ERROR;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
if (WARN_ON_ONCE(!dev->dma_mask))
|
||||
if (IS_ENABLED(CONFIG_DMA_API_DEBUG) &&
|
||||
WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (dma_map_direct(dev, ops))
|
||||
addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
|
||||
else if (use_dma_iommu(dev))
|
||||
addr = iommu_dma_map_resource(dev, phys_addr, size, dir, attrs);
|
||||
else if (ops->map_resource)
|
||||
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
|
||||
|
||||
trace_dma_map_resource(dev, phys_addr, addr, size, dir, attrs);
|
||||
debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
|
||||
return addr;
|
||||
return dma_map_phys(dev, phys_addr, size, dir, attrs | DMA_ATTR_MMIO);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_resource);
|
||||
|
||||
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops))
|
||||
; /* nothing to do: uncached and no swiotlb */
|
||||
else if (use_dma_iommu(dev))
|
||||
iommu_dma_unmap_resource(dev, addr, size, dir, attrs);
|
||||
else if (ops->unmap_resource)
|
||||
ops->unmap_resource(dev, addr, size, dir, attrs);
|
||||
trace_dma_unmap_resource(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_resource(dev, addr, size, dir);
|
||||
dma_unmap_phys(dev, addr, size, dir, attrs | DMA_ATTR_MMIO);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_resource);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user