drm/xe: Remove last fence dependency check from binds and execs

Eliminate redundant last fence dependency checks in exec and bind jobs,
as they are now equivalent to xe_exec_queue_is_idle. Simplify the code
by removing this dead logic.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20251031234050.3043507-7-matthew.brost@intel.com
This commit is contained in:
Matthew Brost
2025-10-31 16:40:50 -07:00
parent aa87b681bc
commit 1a2cf01e1c
6 changed files with 0 additions and 54 deletions

View File

@@ -302,10 +302,6 @@ retry:
goto err_put_job;
if (!xe_vm_in_lr_mode(vm)) {
err = xe_sched_job_last_fence_add_dep(job, vm);
if (err)
goto err_put_job;
err = xe_svm_notifier_lock_interruptible(vm);
if (err)
goto err_put_job;

View File

@@ -1125,29 +1125,6 @@ void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
q->last_fence = dma_fence_get(fence);
}
/**
* xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
* @q: The exec queue
* @vm: The VM the engine does a bind or exec for
*
* Returns:
* -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
*/
int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
{
struct dma_fence *fence;
int err = 0;
fence = xe_exec_queue_last_fence_get(q, vm);
if (fence) {
err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
0 : -ETIME;
dma_fence_put(fence);
}
return err;
}
/**
* xe_exec_queue_tlb_inval_last_fence_put() - Drop ref to last TLB invalidation fence
* @q: The exec queue

View File

@@ -88,8 +88,6 @@ struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *
struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
struct dma_fence *fence);
int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
struct xe_vm *vm);
void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
struct xe_vm *vm,

View File

@@ -1338,13 +1338,6 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
return err;
}
if (!(pt_update_ops->q->flags & EXEC_QUEUE_FLAG_KERNEL)) {
if (job)
err = xe_sched_job_last_fence_add_dep(job, vm);
else
err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm);
}
for (i = 0; job && !err && i < vops->num_syncs; i++)
err = xe_sync_entry_add_deps(&vops->syncs[i], job);

View File

@@ -297,23 +297,6 @@ void xe_sched_job_push(struct xe_sched_job *job)
xe_sched_job_put(job);
}
/**
* xe_sched_job_last_fence_add_dep - Add last fence dependency to job
* @job:job to add the last fence dependency to
* @vm: virtual memory job belongs to
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
{
struct dma_fence *fence;
fence = xe_exec_queue_last_fence_get(job->q, vm);
return drm_sched_job_add_dependency(&job->drm, fence);
}
/**
* xe_sched_job_init_user_fence - Initialize user_fence for the job
* @job: job whose user_fence needs an init

View File

@@ -58,7 +58,6 @@ bool xe_sched_job_completed(struct xe_sched_job *job);
void xe_sched_job_arm(struct xe_sched_job *job);
void xe_sched_job_push(struct xe_sched_job *job);
int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
void xe_sched_job_init_user_fence(struct xe_sched_job *job,
struct xe_sync_entry *sync);