mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-23 07:56:50 +08:00
iommu/vt-d: Create unique domain ops for each stage
Use the domain ops pointer to tell what kind of domain it is instead of the internal use_first_level indication. This also protects against wrongly using a SVA/nested/IDENTITY/BLOCKED domain type in places they should not be. The only remaining uses of use_first_level outside the paging domain are in paging_domain_compatible() and intel_iommu_enforce_cache_coherency(). Thus, remove the useless sets of use_first_level in intel_svm_domain_alloc() and intel_iommu_domain_alloc_nested(). None of the unique ops for these domain types ever reference it on their call chains. Add a WARN_ON() check in domain_context_mapping_one() as it only works with second stage. This is preparation for iommupt which will have different ops for each of the stages. Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/5-v3-dbbe6f7e7ae3+124ffe-vtd_prep_jgg@nvidia.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20250714045028.958850-8-baolu.lu@linux.intel.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
committed by
Will Deacon
parent
b9434ba97c
commit
b33125296b
@@ -371,7 +371,7 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
|
||||
struct intel_iommu *iommu = tag->iommu;
|
||||
u64 type = DMA_TLB_PSI_FLUSH;
|
||||
|
||||
if (domain->use_first_level) {
|
||||
if (intel_domain_is_fs_paging(domain)) {
|
||||
qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr,
|
||||
pages, ih, domain->qi_batch);
|
||||
return;
|
||||
@@ -546,7 +546,8 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
|
||||
qi_batch_flush_descs(iommu, domain->qi_batch);
|
||||
iommu = tag->iommu;
|
||||
|
||||
if (!cap_caching_mode(iommu->cap) || domain->use_first_level) {
|
||||
if (!cap_caching_mode(iommu->cap) ||
|
||||
intel_domain_is_fs_paging(domain)) {
|
||||
iommu_flush_write_buffer(iommu);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1462,6 +1462,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
|
||||
struct context_entry *context;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!intel_domain_is_ss_paging(domain)))
|
||||
return -EINVAL;
|
||||
|
||||
pr_debug("Set context mapping for %02x:%02x.%d\n",
|
||||
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
|
||||
|
||||
@@ -1780,7 +1783,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
|
||||
static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
|
||||
struct intel_iommu *iommu)
|
||||
{
|
||||
if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
|
||||
if (cap_caching_mode(iommu->cap) && intel_domain_is_ss_paging(domain))
|
||||
return true;
|
||||
|
||||
if (rwbf_quirk || cap_rwbf(iommu->cap))
|
||||
@@ -1812,12 +1815,14 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
|
||||
|
||||
if (!sm_supported(iommu))
|
||||
ret = domain_context_mapping(domain, dev);
|
||||
else if (domain->use_first_level)
|
||||
else if (intel_domain_is_fs_paging(domain))
|
||||
ret = domain_setup_first_level(iommu, domain, dev,
|
||||
IOMMU_NO_PASID, NULL);
|
||||
else
|
||||
else if (intel_domain_is_ss_paging(domain))
|
||||
ret = domain_setup_second_level(iommu, domain, dev,
|
||||
IOMMU_NO_PASID, NULL);
|
||||
else if (WARN_ON(true))
|
||||
ret = -EINVAL;
|
||||
|
||||
if (ret)
|
||||
goto out_block_translation;
|
||||
@@ -3288,7 +3293,6 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
|
||||
domain->use_first_level = first_stage;
|
||||
|
||||
domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
|
||||
domain->domain.ops = intel_iommu_ops.default_domain_ops;
|
||||
|
||||
/* calculate the address width */
|
||||
addr_width = agaw_to_width(iommu->agaw);
|
||||
@@ -3346,6 +3350,8 @@ intel_iommu_domain_alloc_first_stage(struct device *dev,
|
||||
dmar_domain = paging_domain_alloc(dev, true);
|
||||
if (IS_ERR(dmar_domain))
|
||||
return ERR_CAST(dmar_domain);
|
||||
|
||||
dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
|
||||
return &dmar_domain->domain;
|
||||
}
|
||||
|
||||
@@ -3374,6 +3380,7 @@ intel_iommu_domain_alloc_second_stage(struct device *dev,
|
||||
if (IS_ERR(dmar_domain))
|
||||
return ERR_CAST(dmar_domain);
|
||||
|
||||
dmar_domain->domain.ops = &intel_ss_paging_domain_ops;
|
||||
dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
|
||||
|
||||
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
|
||||
@@ -4098,12 +4105,15 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
|
||||
if (ret)
|
||||
goto out_remove_dev_pasid;
|
||||
|
||||
if (dmar_domain->use_first_level)
|
||||
if (intel_domain_is_fs_paging(dmar_domain))
|
||||
ret = domain_setup_first_level(iommu, dmar_domain,
|
||||
dev, pasid, old);
|
||||
else
|
||||
else if (intel_domain_is_ss_paging(dmar_domain))
|
||||
ret = domain_setup_second_level(iommu, dmar_domain,
|
||||
dev, pasid, old);
|
||||
else if (WARN_ON(true))
|
||||
ret = -EINVAL;
|
||||
|
||||
if (ret)
|
||||
goto out_unwind_iopf;
|
||||
|
||||
@@ -4378,6 +4388,32 @@ static struct iommu_domain identity_domain = {
|
||||
},
|
||||
};
|
||||
|
||||
const struct iommu_domain_ops intel_fs_paging_domain_ops = {
|
||||
.attach_dev = intel_iommu_attach_device,
|
||||
.set_dev_pasid = intel_iommu_set_dev_pasid,
|
||||
.map_pages = intel_iommu_map_pages,
|
||||
.unmap_pages = intel_iommu_unmap_pages,
|
||||
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
|
||||
.flush_iotlb_all = intel_flush_iotlb_all,
|
||||
.iotlb_sync = intel_iommu_tlb_sync,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.free = intel_iommu_domain_free,
|
||||
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
|
||||
};
|
||||
|
||||
const struct iommu_domain_ops intel_ss_paging_domain_ops = {
|
||||
.attach_dev = intel_iommu_attach_device,
|
||||
.set_dev_pasid = intel_iommu_set_dev_pasid,
|
||||
.map_pages = intel_iommu_map_pages,
|
||||
.unmap_pages = intel_iommu_unmap_pages,
|
||||
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
|
||||
.flush_iotlb_all = intel_flush_iotlb_all,
|
||||
.iotlb_sync = intel_iommu_tlb_sync,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.free = intel_iommu_domain_free,
|
||||
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
|
||||
};
|
||||
|
||||
const struct iommu_ops intel_iommu_ops = {
|
||||
.blocked_domain = &blocking_domain,
|
||||
.release_domain = &blocking_domain,
|
||||
@@ -4396,18 +4432,6 @@ const struct iommu_ops intel_iommu_ops = {
|
||||
.def_domain_type = device_def_domain_type,
|
||||
.pgsize_bitmap = SZ_4K,
|
||||
.page_response = intel_iommu_page_response,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = intel_iommu_attach_device,
|
||||
.set_dev_pasid = intel_iommu_set_dev_pasid,
|
||||
.map_pages = intel_iommu_map_pages,
|
||||
.unmap_pages = intel_iommu_unmap_pages,
|
||||
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
|
||||
.flush_iotlb_all = intel_flush_iotlb_all,
|
||||
.iotlb_sync = intel_iommu_tlb_sync,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.free = intel_iommu_domain_free,
|
||||
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
|
||||
}
|
||||
};
|
||||
|
||||
static void quirk_iommu_igfx(struct pci_dev *dev)
|
||||
|
||||
@@ -1378,6 +1378,18 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
|
||||
u8 devfn, int alloc);
|
||||
|
||||
extern const struct iommu_ops intel_iommu_ops;
|
||||
extern const struct iommu_domain_ops intel_fs_paging_domain_ops;
|
||||
extern const struct iommu_domain_ops intel_ss_paging_domain_ops;
|
||||
|
||||
static inline bool intel_domain_is_fs_paging(struct dmar_domain *domain)
|
||||
{
|
||||
return domain->domain.ops == &intel_fs_paging_domain_ops;
|
||||
}
|
||||
|
||||
static inline bool intel_domain_is_ss_paging(struct dmar_domain *domain)
|
||||
{
|
||||
return domain->domain.ops == &intel_ss_paging_domain_ops;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
extern int intel_iommu_sm;
|
||||
|
||||
@@ -216,8 +216,7 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
|
||||
/* Must be nested domain */
|
||||
if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
if (parent->ops != intel_iommu_ops.default_domain_ops ||
|
||||
!s2_domain->nested_parent)
|
||||
if (!intel_domain_is_ss_paging(s2_domain) || !s2_domain->nested_parent)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ret = iommu_copy_struct_from_user(&vtd, user_data,
|
||||
@@ -229,7 +228,6 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
|
||||
if (!domain)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
domain->use_first_level = true;
|
||||
domain->s2_domain = s2_domain;
|
||||
domain->s1_cfg = vtd;
|
||||
domain->domain.ops = &intel_nested_domain_ops;
|
||||
|
||||
@@ -214,7 +214,6 @@ struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
domain->domain.ops = &intel_svm_domain_ops;
|
||||
domain->use_first_level = true;
|
||||
INIT_LIST_HEAD(&domain->dev_pasids);
|
||||
INIT_LIST_HEAD(&domain->cache_tags);
|
||||
spin_lock_init(&domain->cache_lock);
|
||||
|
||||
Reference in New Issue
Block a user