mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
drm/xe/userptr: replace xe_hmm with gpusvm
Goal here is cut over to gpusvm and remove xe_hmm, relying instead on
common code. The core facilities we need are get_pages(), unmap_pages()
and free_pages() for a given useptr range, plus a vm level notifier
lock, which is now provided by gpusvm.
v2:
- Reuse the same SVM vm struct we use for full SVM, that way we can
use the same lock (Matt B & Himal)
v3:
- Re-use svm_init/fini for userptr.
v4:
- Allow building xe without userptr if we are missing DRM_GPUSVM
config. (Matt B)
- Always make .read_only match xe_vma_read_only() for the ctx. (Dafna)
v5:
- Fix missing conversion with CONFIG_DRM_XE_USERPTR_INVAL_INJECT
v6:
- Convert the new user in xe_vm_madise.
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Dafna Hirschfeld <dafna.hirschfeld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250828142430.615826-17-matthew.auld@intel.com
This commit is contained in:
@@ -761,8 +761,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
|
||||
|
||||
if (!xe_vma_is_null(vma) && !range) {
|
||||
if (xe_vma_is_userptr(vma))
|
||||
xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
|
||||
xe_vma_size(vma), &curs);
|
||||
xe_res_first_dma(to_userptr_vma(vma)->userptr.pages.dma_addr, 0,
|
||||
xe_vma_size(vma), &curs);
|
||||
else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
|
||||
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
|
||||
xe_vma_size(vma), &curs);
|
||||
@@ -915,7 +915,7 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
|
||||
if (xe_vma_bo(vma))
|
||||
xe_bo_assert_held(xe_vma_bo(vma));
|
||||
else if (xe_vma_is_userptr(vma))
|
||||
lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock);
|
||||
lockdep_assert_held(&xe_vma_vm(vma)->svm.gpusvm.notifier_lock);
|
||||
|
||||
if (!(pt_mask & BIT(tile->id)))
|
||||
return false;
|
||||
@@ -1050,7 +1050,7 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
|
||||
xe_pt_commit_prepare_locks_assert(vma);
|
||||
|
||||
if (xe_vma_is_userptr(vma))
|
||||
lockdep_assert_held_read(&vm->userptr.notifier_lock);
|
||||
xe_svm_assert_held_read(vm);
|
||||
}
|
||||
|
||||
static void xe_pt_commit(struct xe_vma *vma,
|
||||
@@ -1407,7 +1407,7 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
|
||||
struct xe_userptr_vma *uvma;
|
||||
unsigned long notifier_seq;
|
||||
|
||||
lockdep_assert_held_read(&vm->userptr.notifier_lock);
|
||||
xe_svm_assert_held_read(vm);
|
||||
|
||||
if (!xe_vma_is_userptr(vma))
|
||||
return 0;
|
||||
@@ -1416,7 +1416,7 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
|
||||
if (xe_pt_userptr_inject_eagain(uvma))
|
||||
xe_vma_userptr_force_invalidate(uvma);
|
||||
|
||||
notifier_seq = uvma->userptr.notifier_seq;
|
||||
notifier_seq = uvma->userptr.pages.notifier_seq;
|
||||
|
||||
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
|
||||
notifier_seq))
|
||||
@@ -1437,7 +1437,7 @@ static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
lockdep_assert_held_read(&vm->userptr.notifier_lock);
|
||||
xe_svm_assert_held_read(vm);
|
||||
|
||||
switch (op->base.op) {
|
||||
case DRM_GPUVA_OP_MAP:
|
||||
@@ -1478,12 +1478,12 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
down_read(&vm->userptr.notifier_lock);
|
||||
down_read(&vm->svm.gpusvm.notifier_lock);
|
||||
|
||||
list_for_each_entry(op, &vops->list, link) {
|
||||
err = op_check_userptr(vm, op, pt_update_ops);
|
||||
if (err) {
|
||||
up_read(&vm->userptr.notifier_lock);
|
||||
up_read(&vm->svm.gpusvm.notifier_lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -2200,7 +2200,7 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
|
||||
vma->tile_invalidated & ~BIT(tile->id));
|
||||
vma->tile_staged &= ~BIT(tile->id);
|
||||
if (xe_vma_is_userptr(vma)) {
|
||||
lockdep_assert_held_read(&vm->userptr.notifier_lock);
|
||||
xe_svm_assert_held_read(vm);
|
||||
to_userptr_vma(vma)->userptr.initial_bind = true;
|
||||
}
|
||||
|
||||
@@ -2236,7 +2236,7 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
|
||||
if (!vma->tile_present) {
|
||||
list_del_init(&vma->combined_links.rebind);
|
||||
if (xe_vma_is_userptr(vma)) {
|
||||
lockdep_assert_held_read(&vm->userptr.notifier_lock);
|
||||
xe_svm_assert_held_read(vm);
|
||||
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
|
||||
@@ -2535,7 +2535,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
|
||||
if (pt_update_ops->needs_svm_lock)
|
||||
xe_svm_notifier_unlock(vm);
|
||||
if (pt_update_ops->needs_userptr_lock)
|
||||
up_read(&vm->userptr.notifier_lock);
|
||||
up_read(&vm->svm.gpusvm.notifier_lock);
|
||||
|
||||
xe_tlb_inval_job_put(mjob);
|
||||
xe_tlb_inval_job_put(ijob);
|
||||
|
||||
Reference in New Issue
Block a user