mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
iommu/terga: Do not use struct page as the handle for as->pd memory
Instead use the virtual address. Change from dma_map_page() to dma_map_single() which works directly on a KVA. Add a type for the pd table level for clarity. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/1-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
8ffd015db8
commit
50568f87d1
@ -51,6 +51,8 @@ struct tegra_smmu {
|
|||||||
struct iommu_device iommu; /* IOMMU Core code handle */
|
struct iommu_device iommu; /* IOMMU Core code handle */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct tegra_pd;
|
||||||
|
|
||||||
struct tegra_smmu_as {
|
struct tegra_smmu_as {
|
||||||
struct iommu_domain domain;
|
struct iommu_domain domain;
|
||||||
struct tegra_smmu *smmu;
|
struct tegra_smmu *smmu;
|
||||||
@ -58,7 +60,7 @@ struct tegra_smmu_as {
|
|||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
u32 *count;
|
u32 *count;
|
||||||
struct page **pts;
|
struct page **pts;
|
||||||
struct page *pd;
|
struct tegra_pd *pd;
|
||||||
dma_addr_t pd_dma;
|
dma_addr_t pd_dma;
|
||||||
unsigned id;
|
unsigned id;
|
||||||
u32 attr;
|
u32 attr;
|
||||||
@ -155,6 +157,10 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
|
|||||||
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
|
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
|
||||||
SMMU_PDE_NONSECURE)
|
SMMU_PDE_NONSECURE)
|
||||||
|
|
||||||
|
struct tegra_pd {
|
||||||
|
u32 val[SMMU_NUM_PDE];
|
||||||
|
};
|
||||||
|
|
||||||
static unsigned int iova_pd_index(unsigned long iova)
|
static unsigned int iova_pd_index(unsigned long iova)
|
||||||
{
|
{
|
||||||
return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
|
return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
|
||||||
@ -284,7 +290,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
|
|||||||
|
|
||||||
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
|
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
|
||||||
|
|
||||||
as->pd = __iommu_alloc_pages(GFP_KERNEL | __GFP_DMA, 0);
|
as->pd = iommu_alloc_page(GFP_KERNEL | __GFP_DMA);
|
||||||
if (!as->pd) {
|
if (!as->pd) {
|
||||||
kfree(as);
|
kfree(as);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -292,7 +298,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
|
|||||||
|
|
||||||
as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
|
as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
|
||||||
if (!as->count) {
|
if (!as->count) {
|
||||||
__iommu_free_pages(as->pd, 0);
|
iommu_free_page(as->pd);
|
||||||
kfree(as);
|
kfree(as);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -300,7 +306,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
|
|||||||
as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
|
as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
|
||||||
if (!as->pts) {
|
if (!as->pts) {
|
||||||
kfree(as->count);
|
kfree(as->count);
|
||||||
__iommu_free_pages(as->pd, 0);
|
iommu_free_page(as->pd);
|
||||||
kfree(as);
|
kfree(as);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -417,8 +423,8 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
|
as->pd_dma =
|
||||||
DMA_TO_DEVICE);
|
dma_map_single(smmu->dev, as->pd, SMMU_SIZE_PD, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(smmu->dev, as->pd_dma)) {
|
if (dma_mapping_error(smmu->dev, as->pd_dma)) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
@ -450,7 +456,7 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_unmap:
|
err_unmap:
|
||||||
dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
|
dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&smmu->lock);
|
mutex_unlock(&smmu->lock);
|
||||||
|
|
||||||
@ -469,7 +475,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
|
|||||||
|
|
||||||
tegra_smmu_free_asid(smmu, as->id);
|
tegra_smmu_free_asid(smmu, as->id);
|
||||||
|
|
||||||
dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
|
dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
|
||||||
|
|
||||||
as->smmu = NULL;
|
as->smmu = NULL;
|
||||||
|
|
||||||
@ -548,11 +554,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
|
|||||||
{
|
{
|
||||||
unsigned int pd_index = iova_pd_index(iova);
|
unsigned int pd_index = iova_pd_index(iova);
|
||||||
struct tegra_smmu *smmu = as->smmu;
|
struct tegra_smmu *smmu = as->smmu;
|
||||||
u32 *pd = page_address(as->pd);
|
struct tegra_pd *pd = as->pd;
|
||||||
unsigned long offset = pd_index * sizeof(*pd);
|
unsigned long offset = pd_index * sizeof(*pd);
|
||||||
|
|
||||||
/* Set the page directory entry first */
|
/* Set the page directory entry first */
|
||||||
pd[pd_index] = value;
|
pd->val[pd_index] = value;
|
||||||
|
|
||||||
/* The flush the page directory entry from caches */
|
/* The flush the page directory entry from caches */
|
||||||
dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
|
dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
|
||||||
@ -577,14 +583,12 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
|
|||||||
unsigned int pd_index = iova_pd_index(iova);
|
unsigned int pd_index = iova_pd_index(iova);
|
||||||
struct tegra_smmu *smmu = as->smmu;
|
struct tegra_smmu *smmu = as->smmu;
|
||||||
struct page *pt_page;
|
struct page *pt_page;
|
||||||
u32 *pd;
|
|
||||||
|
|
||||||
pt_page = as->pts[pd_index];
|
pt_page = as->pts[pd_index];
|
||||||
if (!pt_page)
|
if (!pt_page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
pd = page_address(as->pd);
|
*dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]);
|
||||||
*dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
|
|
||||||
|
|
||||||
return tegra_smmu_pte_offset(pt_page, iova);
|
return tegra_smmu_pte_offset(pt_page, iova);
|
||||||
}
|
}
|
||||||
@ -619,9 +623,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
|
|||||||
|
|
||||||
*dmap = dma;
|
*dmap = dma;
|
||||||
} else {
|
} else {
|
||||||
u32 *pd = page_address(as->pd);
|
*dmap = smmu_pde_to_dma(smmu, as->pd->val[pde]);
|
||||||
|
|
||||||
*dmap = smmu_pde_to_dma(smmu, pd[pde]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return tegra_smmu_pte_offset(as->pts[pde], iova);
|
return tegra_smmu_pte_offset(as->pts[pde], iova);
|
||||||
@ -645,8 +647,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
|
|||||||
*/
|
*/
|
||||||
if (--as->count[pde] == 0) {
|
if (--as->count[pde] == 0) {
|
||||||
struct tegra_smmu *smmu = as->smmu;
|
struct tegra_smmu *smmu = as->smmu;
|
||||||
u32 *pd = page_address(as->pd);
|
dma_addr_t pte_dma = smmu_pde_to_dma(smmu, as->pd->val[pde]);
|
||||||
dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
|
|
||||||
|
|
||||||
tegra_smmu_set_pde(as, iova, 0);
|
tegra_smmu_set_pde(as, iova, 0);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user