mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
If a dma-fence submission has in-fences and pagefault queues are running work, there is little incentive to kick the pagefault queues off the hardware until the dma-fence submission is ready to run. Therefore, wait on the in-fences of the dma-fence submission before removing the pagefault queues from the hardware. v2: - Fix kernel doc (CI) - Don't wait under lock (Thomas) - Make wait interruptable Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patch.msgid.link/20251212182847.1683222-6-matthew.brost@intel.com
32 lines
991 B
C
32 lines
991 B
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2024 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_HW_ENGINE_GROUP_H_
|
|
#define _XE_HW_ENGINE_GROUP_H_
|
|
|
|
#include "xe_hw_engine_group_types.h"
|
|
|
|
struct drm_device;
|
|
struct xe_exec_queue;
|
|
struct xe_gt;
|
|
struct xe_sync_entry;
|
|
|
|
int xe_hw_engine_setup_groups(struct xe_gt *gt);
|
|
|
|
int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q);
|
|
void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q);
|
|
|
|
int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
|
|
enum xe_hw_engine_group_execution_mode new_mode,
|
|
enum xe_hw_engine_group_execution_mode *previous_mode,
|
|
struct xe_sync_entry *syncs, int num_syncs);
|
|
void xe_hw_engine_group_put(struct xe_hw_engine_group *group);
|
|
|
|
enum xe_hw_engine_group_execution_mode
|
|
xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q);
|
|
void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group);
|
|
|
|
#endif
|