2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

drm/xe/hmm: Don't dereference struct page pointers without notifier lock

The pnfs that we obtain from hmm_range_fault() point to pages that
we don't have a reference on, and the guarantee that they are still
in the cpu page-tables is that the notifier lock must be held and the
notifier seqno is still valid.

So while building the sg table and marking the pages accesses / dirty
we need to hold this lock with a validated seqno.

However, the lock is reclaim tainted which makes
sg_alloc_table_from_pages_segment() unusable, since it internally
allocates memory.

Instead build the sg-table manually. For the non-iommu case
this might lead to fewer coalesces, but if that's a problem it can
be fixed up later in the resource cursor code. For the iommu case,
the whole sg-table may still be coalesced to a single contigous
device va region.

This avoids marking pages that we don't own dirty and accessed, and
it also avoid dereferencing struct pages that we don't own.

v2:
- Use assert to check whether hmm pfns are valid (Matthew Auld)
- Take into account that large pages may cross range boundaries
  (Matthew Auld)

v3:
- Don't unnecessarily check for a non-freed sg-table. (Matthew Auld)
- Add a missing up_read() in an error path. (Matthew Auld)

Fixes: 81e058a3e7 ("drm/xe: Introduce helper to populate userptr")
Cc: Oak Zeng <oak.zeng@intel.com>
Cc: <stable@vger.kernel.org> # v6.10+
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Acked-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250304173342.22009-3-thomas.hellstrom@linux.intel.com
(cherry picked from commit ea3e66d280)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Thomas Hellström 2025-03-04 18:33:41 +01:00 committed by Rodrigo Vivi
parent e3e2e7fc4c
commit 0a98219bcc
No known key found for this signature in database
GPG Key ID: FA625F640EEB13CA

View File

@ -42,6 +42,42 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
} }
} }
static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
struct hmm_range *range, struct rw_semaphore *notifier_sem)
{
unsigned long i, npages, hmm_pfn;
unsigned long num_chunks = 0;
int ret;
/* HMM docs says this is needed. */
ret = down_read_interruptible(notifier_sem);
if (ret)
return ret;
if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
up_read(notifier_sem);
return -EAGAIN;
}
npages = xe_npages_in_range(range->start, range->end);
for (i = 0; i < npages;) {
unsigned long len;
hmm_pfn = range->hmm_pfns[i];
xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
/* If order > 0 the page may extend beyond range->start */
len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
i += len;
num_chunks++;
}
up_read(notifier_sem);
return sg_alloc_table(st, num_chunks, GFP_KERNEL);
}
/** /**
* xe_build_sg() - build a scatter gather table for all the physical pages/pfn * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
* in a hmm_range. dma-map pages if necessary. dma-address is save in sg table * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
@ -50,6 +86,7 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
* @range: the hmm range that we build the sg table from. range->hmm_pfns[] * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
* has the pfn numbers of pages that back up this hmm address range. * has the pfn numbers of pages that back up this hmm address range.
* @st: pointer to the sg table. * @st: pointer to the sg table.
* @notifier_sem: The xe notifier lock.
* @write: whether we write to this range. This decides dma map direction * @write: whether we write to this range. This decides dma map direction
* for system pages. If write we map it bi-diretional; otherwise * for system pages. If write we map it bi-diretional; otherwise
* DMA_TO_DEVICE * DMA_TO_DEVICE
@ -76,38 +113,41 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
* Returns 0 if successful; -ENOMEM if fails to allocate memory * Returns 0 if successful; -ENOMEM if fails to allocate memory
*/ */
static int xe_build_sg(struct xe_device *xe, struct hmm_range *range, static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
struct sg_table *st, bool write) struct sg_table *st,
struct rw_semaphore *notifier_sem,
bool write)
{ {
unsigned long npages = xe_npages_in_range(range->start, range->end);
struct device *dev = xe->drm.dev; struct device *dev = xe->drm.dev;
struct page **pages; struct scatterlist *sgl;
u64 i, npages; struct page *page;
int ret; unsigned long i, j;
npages = xe_npages_in_range(range->start, range->end); lockdep_assert_held(notifier_sem);
pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
for (i = 0; i < npages; i++) { i = 0;
pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); for_each_sg(st->sgl, sgl, st->nents, j) {
xe_assert(xe, !is_device_private_page(pages[i])); unsigned long hmm_pfn, size;
hmm_pfn = range->hmm_pfns[i];
page = hmm_pfn_to_page(hmm_pfn);
xe_assert(xe, !is_device_private_page(page));
size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
size -= page_to_pfn(page) & (size - 1);
i += size;
if (unlikely(j == st->nents - 1)) {
if (i > npages)
size -= (i - npages);
sg_mark_end(sgl);
}
sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
} }
xe_assert(xe, i == npages);
ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT, return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
xe_sg_segment_size(dev), GFP_KERNEL); DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
if (ret)
goto free_pages;
ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
if (ret) {
sg_free_table(st);
st = NULL;
}
free_pages:
kvfree(pages);
return ret;
} }
/** /**
@ -237,16 +277,36 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
if (ret) if (ret)
goto free_pfns; goto free_pfns;
ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write); ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
if (ret) if (ret)
goto free_pfns; goto free_pfns;
ret = down_read_interruptible(&vm->userptr.notifier_lock);
if (ret)
goto free_st;
if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
ret = -EAGAIN;
goto out_unlock;
}
ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
&vm->userptr.notifier_lock, write);
if (ret)
goto out_unlock;
xe_mark_range_accessed(&hmm_range, write); xe_mark_range_accessed(&hmm_range, write);
userptr->sg = &userptr->sgt; userptr->sg = &userptr->sgt;
userptr->notifier_seq = hmm_range.notifier_seq; userptr->notifier_seq = hmm_range.notifier_seq;
up_read(&vm->userptr.notifier_lock);
kvfree(pfns);
return 0;
out_unlock:
up_read(&vm->userptr.notifier_lock);
free_st:
sg_free_table(&userptr->sgt);
free_pfns: free_pfns:
kvfree(pfns); kvfree(pfns);
return ret; return ret;
} }