diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 7b0bbe48d2aa..64ea246a1fb8 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -72,10 +72,9 @@ exec_queue_to_guc(struct xe_exec_queue *q) #define EXEC_QUEUE_STATE_WEDGED (1 << 8) #define EXEC_QUEUE_STATE_BANNED (1 << 9) #define EXEC_QUEUE_STATE_CHECK_TIMEOUT (1 << 10) -#define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11) -#define EXEC_QUEUE_STATE_PENDING_RESUME (1 << 12) -#define EXEC_QUEUE_STATE_PENDING_TDR_EXIT (1 << 13) -#define EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND (1 << 14) +#define EXEC_QUEUE_STATE_PENDING_RESUME (1 << 11) +#define EXEC_QUEUE_STATE_PENDING_TDR_EXIT (1 << 12) +#define EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND (1 << 13) static bool exec_queue_registered(struct xe_exec_queue *q) { @@ -222,21 +221,6 @@ static void clear_exec_queue_check_timeout(struct xe_exec_queue *q) atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); } -static bool exec_queue_extra_ref(struct xe_exec_queue *q) -{ - return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF; -} - -static void set_exec_queue_extra_ref(struct xe_exec_queue *q) -{ - atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state); -} - -static void clear_exec_queue_extra_ref(struct xe_exec_queue *q) -{ - atomic_and(~EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state); -} - static bool exec_queue_pending_resume(struct xe_exec_queue *q) { return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_RESUME; @@ -1538,28 +1522,6 @@ static void disable_scheduling(struct xe_exec_queue *q, bool immediate) G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1); } -static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) -{ - u32 action[] = { - XE_GUC_ACTION_DEREGISTER_CONTEXT, - q->guc->id, - }; - - xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); - xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); - xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); - xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); - - set_exec_queue_destroyed(q); - trace_xe_exec_queue_deregister(q); - - if (xe_exec_queue_is_multi_queue_secondary(q)) - handle_deregister_done(guc, q); - else - xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), - G2H_LEN_DW_DEREGISTER_CONTEXT, 1); -} - static enum drm_gpu_sched_stat guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) { @@ -1575,6 +1537,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) bool wedged = false, skip_timeout_check; xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_lr(q)); + xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); /* * TDR has fired before free job worker. Common if exec queue @@ -1591,8 +1554,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) /* Must check all state after stopping scheduler */ skip_timeout_check = exec_queue_reset(q) || - exec_queue_killed_or_banned_or_wedged(q) || - exec_queue_destroyed(q); + exec_queue_killed_or_banned_or_wedged(q); /* Skip timeout check if multi-queue group is banned */ if (xe_exec_queue_is_multi_queue(q) && @@ -1632,13 +1594,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); /* Engine state now stable, disable scheduling to check timestamp */ - if (!wedged && exec_queue_registered(q)) { + if (!wedged && (exec_queue_enabled(q) || exec_queue_pending_disable(q))) { int ret; if (exec_queue_reset(q)) err = -EIO; - if (!exec_queue_destroyed(q) && xe_uc_fw_is_running(&guc->fw)) { + if (xe_uc_fw_is_running(&guc->fw)) { /* * Wait for any pending G2H to flush out before * modifying state @@ -1688,8 +1650,6 @@ trigger_reset: xe_devcoredump(q, job, "Schedule disable failed to respond, guc_id=%d, ret=%d, guc_read=%d", q->guc->id, ret, xe_guc_read_stopped(guc)); - set_exec_queue_extra_ref(q); - xe_exec_queue_get(q); /* GT reset owns this */ set_exec_queue_banned(q); xe_gt_reset_async(q->gt); xe_sched_tdr_queue_imm(sched); @@ -1742,13 +1702,7 @@ trigger_reset: } } - /* Finish cleaning up exec queue via deregister */ set_exec_queue_banned(q); - if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) { - set_exec_queue_extra_ref(q); - xe_exec_queue_get(q); - __deregister_exec_queue(guc, q); - } /* Mark all outstanding jobs as bad, thus completing them */ xe_sched_job_set_error(job, err); @@ -2419,7 +2373,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) /* Clean up lost G2H + reset engine state */ if (exec_queue_registered(q)) { - if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) + if (xe_exec_queue_is_lr(q)) xe_exec_queue_put(q); else if (exec_queue_destroyed(q)) __guc_exec_queue_destroy(guc, q); @@ -2553,11 +2507,7 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc, if (exec_queue_destroyed(q) && exec_queue_registered(q)) { clear_exec_queue_destroyed(q); - if (exec_queue_extra_ref(q)) - xe_exec_queue_put(q); - else - q->guc->needs_cleanup = true; - clear_exec_queue_extra_ref(q); + q->guc->needs_cleanup = true; xe_gt_dbg(guc_to_gt(guc), "Replay CLEANUP - guc_id=%d", q->guc->id); } @@ -3037,7 +2987,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q) clear_exec_queue_registered(q); - if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) + if (xe_exec_queue_is_lr(q)) xe_exec_queue_put(q); else __guc_exec_queue_destroy(guc, q);