Revert "drm/xe/vf: Post migration, repopulate ring area for pending request"

This reverts commit a0dda25d24.

Due to change in the VF migration recovery design this code
is not needed any more.

v3:
 - Add commit message (Michal / Lucas)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20251002233824.203417-3-michal.wajdeczko@intel.com
This commit is contained in:
Matthew Brost
2025-10-03 01:38:23 +02:00
parent 08c98f3f2b
commit 6c640592e8
5 changed files with 1 additions and 53 deletions

View File

@@ -845,30 +845,6 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
return fence;
}
/**
* xe_guc_jobs_ring_rebase - Re-emit ring commands of requests pending
* on all queues under a guc.
* @guc: the &xe_guc struct instance
*/
void xe_guc_jobs_ring_rebase(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
/*
* This routine is used within VF migration recovery. This means
* using the lock here introduces a restriction: we cannot wait
* for any GFX HW response while the lock is taken.
*/
mutex_lock(&guc->submission_state.lock);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
if (exec_queue_killed_or_banned_or_wedged(q))
continue;
xe_exec_queue_jobs_ring_restore(q);
}
mutex_unlock(&guc->submission_state.lock);
}
static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);