mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
drm/xe: Introduce CONFIG_DRM_XE_GPUSVM
Don't rely on CONFIG_DRM_GPUSVM because other drivers may enable it causing us to compile in SVM support unintentionally. Also take the opportunity to leave more code out of compilation if !CONFIG_DRM_XE_GPUSVM and !CONFIG_DRM_XE_DEVMEM_MIRROR v3: - Fixes for compilation errors on 32-bit. This changes the Kconfig logic a bit. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250326080551.40201-2-thomas.hellstrom@linux.intel.com
This commit is contained in:
@@ -39,7 +39,6 @@ config DRM_XE
|
||||
select DRM_TTM_HELPER
|
||||
select DRM_EXEC
|
||||
select DRM_GPUVM
|
||||
select DRM_GPUSVM if !UML && DEVICE_PRIVATE
|
||||
select DRM_SCHED
|
||||
select MMU_NOTIFIER
|
||||
select WANT_DEV_COREDUMP
|
||||
@@ -74,9 +73,22 @@ config DRM_XE_DP_TUNNEL
|
||||
|
||||
If in doubt say "Y".
|
||||
|
||||
config DRM_XE_GPUSVM
|
||||
bool "Enable CPU to GPU address mirroring"
|
||||
depends on DRM_XE
|
||||
depends on !UML
|
||||
depends on DEVICE_PRIVATE
|
||||
default y
|
||||
select DRM_GPUSVM
|
||||
help
|
||||
Enable this option if you want support for CPU to GPU address
|
||||
mirroring.
|
||||
|
||||
If in doubut say "Y".
|
||||
|
||||
config DRM_XE_DEVMEM_MIRROR
|
||||
bool "Enable device memory mirror"
|
||||
depends on DRM_XE
|
||||
depends on DRM_XE_GPUSVM
|
||||
select GET_FREE_REGION
|
||||
default y
|
||||
help
|
||||
|
||||
@@ -125,7 +125,7 @@ xe-y += xe_bb.o \
|
||||
xe_wopcm.o
|
||||
|
||||
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
|
||||
xe-$(CONFIG_DRM_GPUSVM) += xe_svm.o
|
||||
xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
|
||||
|
||||
# graphics hardware monitoring (HWMON) support
|
||||
xe-$(CONFIG_HWMON) += xe_hwmon.o
|
||||
|
||||
@@ -107,6 +107,9 @@ struct xe_vram_region {
|
||||
resource_size_t actual_physical_size;
|
||||
/** @mapping: pointer to VRAM mappable space */
|
||||
void __iomem *mapping;
|
||||
/** @ttm: VRAM TTM manager */
|
||||
struct xe_ttm_vram_mgr ttm;
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
|
||||
/** @pagemap: Used to remap device memory as ZONE_DEVICE */
|
||||
struct dev_pagemap pagemap;
|
||||
/**
|
||||
@@ -120,8 +123,7 @@ struct xe_vram_region {
|
||||
* This is generated when remap device memory as ZONE_DEVICE
|
||||
*/
|
||||
resource_size_t hpa_base;
|
||||
/** @ttm: VRAM TTM manager */
|
||||
struct xe_ttm_vram_mgr ttm;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -1544,6 +1544,7 @@ void xe_migrate_wait(struct xe_migrate *m)
|
||||
dma_fence_wait(m->fence, false);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
|
||||
static u32 pte_update_cmd_size(u64 size)
|
||||
{
|
||||
u32 num_dword;
|
||||
@@ -1719,6 +1720,8 @@ struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
|
||||
XE_MIGRATE_COPY_TO_SRAM);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
|
||||
#include "tests/xe_migrate.c"
|
||||
#endif
|
||||
|
||||
@@ -1420,6 +1420,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
|
||||
return err;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
|
||||
static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
|
||||
{
|
||||
struct xe_vm *vm = pt_update->vops->vm;
|
||||
@@ -1453,6 +1454,7 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct invalidation_fence {
|
||||
struct xe_gt_tlb_invalidation_fence base;
|
||||
@@ -2257,11 +2259,15 @@ static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
|
||||
.pre_commit = xe_pt_userptr_pre_commit,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
|
||||
static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
|
||||
.populate = xe_vm_populate_pgtable,
|
||||
.clear = xe_migrate_clear_pgtable_callback,
|
||||
.pre_commit = xe_pt_svm_pre_commit,
|
||||
};
|
||||
#else
|
||||
static const struct xe_migrate_pt_update_ops svm_migrate_ops;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* xe_pt_update_ops_run() - Run PT update operations
|
||||
|
||||
@@ -340,7 +340,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
|
||||
if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
|
||||
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
|
||||
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
|
||||
if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_GPUSVM))
|
||||
if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
|
||||
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
|
||||
DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR;
|
||||
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
|
||||
|
||||
@@ -340,6 +340,8 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w)
|
||||
up_write(&vm->lock);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
|
||||
|
||||
static struct xe_vram_region *page_to_vr(struct page *page)
|
||||
{
|
||||
return container_of(page->pgmap, struct xe_vram_region, pagemap);
|
||||
@@ -578,6 +580,8 @@ static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
|
||||
.copy_to_ram = xe_svm_copy_to_ram,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
static const struct drm_gpusvm_ops gpusvm_ops = {
|
||||
.range_alloc = xe_svm_range_alloc,
|
||||
.range_free = xe_svm_range_free,
|
||||
@@ -651,6 +655,7 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range,
|
||||
return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
|
||||
static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
|
||||
{
|
||||
return &tile->mem.vram;
|
||||
@@ -709,6 +714,15 @@ unlock:
|
||||
|
||||
return err;
|
||||
}
|
||||
#else
|
||||
static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
|
||||
struct xe_svm_range *range,
|
||||
const struct drm_gpusvm_ctx *ctx)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* xe_svm_handle_pagefault() - SVM handle page fault
|
||||
@@ -867,6 +881,7 @@ int xe_svm_bo_evict(struct xe_bo *bo)
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
|
||||
|
||||
static struct drm_pagemap_device_addr
|
||||
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
|
||||
struct device *dev,
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
#ifndef _XE_SVM_H_
|
||||
#define _XE_SVM_H_
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
|
||||
|
||||
#include <drm/drm_pagemap.h>
|
||||
#include <drm/drm_gpusvm.h>
|
||||
|
||||
@@ -44,7 +46,6 @@ struct xe_svm_range {
|
||||
u8 skip_migrate :1;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_GPUSVM)
|
||||
/**
|
||||
* xe_svm_range_pages_valid() - SVM range pages valid
|
||||
* @range: SVM range
|
||||
@@ -73,7 +74,50 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
|
||||
int xe_svm_bo_evict(struct xe_bo *bo);
|
||||
|
||||
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
|
||||
|
||||
/**
|
||||
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
|
||||
* @range: SVM range
|
||||
*
|
||||
* Return: True if SVM range has a DMA mapping, False otherwise
|
||||
*/
|
||||
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
|
||||
{
|
||||
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
|
||||
return range->base.flags.has_dma_mapping;
|
||||
}
|
||||
|
||||
#define xe_svm_assert_in_notifier(vm__) \
|
||||
lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
|
||||
|
||||
#define xe_svm_notifier_lock(vm__) \
|
||||
drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
|
||||
|
||||
#define xe_svm_notifier_unlock(vm__) \
|
||||
drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
|
||||
|
||||
#else
|
||||
#include <linux/interval_tree.h>
|
||||
|
||||
struct drm_pagemap_device_addr;
|
||||
struct xe_bo;
|
||||
struct xe_gt;
|
||||
struct xe_vm;
|
||||
struct xe_vma;
|
||||
struct xe_tile;
|
||||
struct xe_vram_region;
|
||||
|
||||
#define XE_INTERCONNECT_VRAM 1
|
||||
|
||||
struct xe_svm_range {
|
||||
struct {
|
||||
struct interval_tree_node itree;
|
||||
const struct drm_pagemap_device_addr *dma_addr;
|
||||
} base;
|
||||
u32 tile_present;
|
||||
u32 tile_invalidated;
|
||||
};
|
||||
|
||||
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
|
||||
{
|
||||
return false;
|
||||
@@ -125,27 +169,16 @@ static inline
|
||||
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
|
||||
* @range: SVM range
|
||||
*
|
||||
* Return: True if SVM range has a DMA mapping, False otherwise
|
||||
*/
|
||||
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
|
||||
#define xe_svm_assert_in_notifier(...) do {} while (0)
|
||||
#define xe_svm_range_has_dma_mapping(...) false
|
||||
|
||||
static inline void xe_svm_notifier_lock(struct xe_vm *vm)
|
||||
{
|
||||
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
|
||||
return range->base.flags.has_dma_mapping;
|
||||
}
|
||||
|
||||
#define xe_svm_assert_in_notifier(vm__) \
|
||||
lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
|
||||
|
||||
#define xe_svm_notifier_lock(vm__) \
|
||||
drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
|
||||
|
||||
#define xe_svm_notifier_unlock(vm__) \
|
||||
drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
|
||||
|
||||
static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -3109,7 +3109,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
|
||||
|
||||
if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
|
||||
(!xe_vm_in_fault_mode(vm) ||
|
||||
!IS_ENABLED(CONFIG_DRM_GPUSVM)))) {
|
||||
!IS_ENABLED(CONFIG_DRM_XE_GPUSVM)))) {
|
||||
err = -EINVAL;
|
||||
goto free_bind_ops;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user