drm/amdgpu: fix possible fence leaks from job structure

If we don't end up initializing the fences, free them when
we free the job.  We can't set the hw_fence to NULL after
emitting it because we need it in the cleanup path for the
submit direct case.

v2: take a reference to the fences if we emit them
v3: handle non-job fence in error paths

Fixes: db36632ea5 ("drm/amdgpu: clean up and unify hw fence handling")
Reviewed-by: Jesse Zhang <Jesse.Zhang@amd.com> (v1)
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Alex Deucher
2025-10-22 17:11:38 -04:00
parent d95ca7f515
commit f903b85ed0
3 changed files with 35 additions and 4 deletions

View File

@@ -176,18 +176,21 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
r = -EINVAL;
goto free_fence;
}
if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
r = -EINVAL;
goto free_fence;
}
if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
(!ring->funcs->secure_submission_supported)) {
dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name);
return -EINVAL;
r = -EINVAL;
goto free_fence;
}
alloc_size = ring->funcs->emit_frame_size + num_ibs *
@@ -196,7 +199,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
return r;
goto free_fence;
}
need_ctx_switch = ring->current_ctx != fence_ctx;
@@ -302,6 +305,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
return r;
}
*f = &af->base;
/* get a ref for the job */
if (job)
dma_fence_get(*f);
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
@@ -328,6 +334,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_commit(ring);
return 0;
free_fence:
if (!job)
kfree(af);
return r;
}
/**

View File

@@ -293,6 +293,15 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
if (job->hw_fence->base.ops)
dma_fence_put(&job->hw_fence->base);
else
kfree(job->hw_fence);
if (job->hw_vm_fence->base.ops)
dma_fence_put(&job->hw_vm_fence->base);
else
kfree(job->hw_vm_fence);
kfree(job);
}
@@ -322,6 +331,15 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
if (job->hw_fence->base.ops)
dma_fence_put(&job->hw_fence->base);
else
kfree(job->hw_fence);
if (job->hw_vm_fence->base.ops)
dma_fence_put(&job->hw_vm_fence->base);
else
kfree(job->hw_vm_fence);
kfree(job);
}

View File

@@ -849,6 +849,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (r)
return r;
fence = &job->hw_vm_fence->base;
/* get a ref for the job */
dma_fence_get(fence);
}
if (vm_flush_needed) {