mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
Mixing GPU and CPU atomics does not work unless a strict migration policy of GPU atomics must be device memory. Enforce a policy of must be in VRAM with a retry loop of 3 attempts, if retry loop fails abort fault. Removing always_migrate_to_vram modparam as we now have real migration policy. v2: - Only retry migration on atomics - Drop alway migrate modparam v3: - Only set vram_only on DGFX (Himal) - Bail on get_pages failure if vram_only and retry count exceeded (Himal) - s/vram_only/devmem_only - Update xe_svm_range_is_valid to accept devmem_only argument v4: - Fix logic bug get_pages failure v5: - Fix commit message (Himal) - Mention removing always_migrate_to_vram in commit message (Lucas) - Fix xe_svm_range_is_valid to check for devmem pages - Bail on devmem_only && !migrate_devmem (Thomas) v6: - Add READ_ONCE barriers for opportunistic checks (Thomas) - Pair READ_ONCE with WRITE_ONCE (Thomas) v7: - Adjust comments (Thomas) Fixes:2f118c9491("drm/xe: Add SVM VRAM migration") Cc: stable@vger.kernel.org Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Acked-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://lore.kernel.org/r/20250512135500.1405019-3-matthew.brost@intel.com (cherry picked from commita9ac0fa455) Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
154 lines
3.2 KiB
C
154 lines
3.2 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2024 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_SVM_H_
|
|
#define _XE_SVM_H_
|
|
|
|
#include <drm/drm_pagemap.h>
|
|
#include <drm/drm_gpusvm.h>
|
|
|
|
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
|
|
|
|
struct xe_bo;
|
|
struct xe_vram_region;
|
|
struct xe_tile;
|
|
struct xe_vm;
|
|
struct xe_vma;
|
|
|
|
/** struct xe_svm_range - SVM range */
|
|
struct xe_svm_range {
|
|
/** @base: base drm_gpusvm_range */
|
|
struct drm_gpusvm_range base;
|
|
/**
|
|
* @garbage_collector_link: Link into VM's garbage collect SVM range
|
|
* list. Protected by VM's garbage collect lock.
|
|
*/
|
|
struct list_head garbage_collector_link;
|
|
/**
|
|
* @tile_present: Tile mask of binding is present for this range.
|
|
* Protected by GPU SVM notifier lock.
|
|
*/
|
|
u8 tile_present;
|
|
/**
|
|
* @tile_invalidated: Tile mask of binding is invalidated for this
|
|
* range. Protected by GPU SVM notifier lock.
|
|
*/
|
|
u8 tile_invalidated;
|
|
};
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_GPUSVM)
|
|
/**
|
|
* xe_svm_range_pages_valid() - SVM range pages valid
|
|
* @range: SVM range
|
|
*
|
|
* Return: True if SVM range pages are valid, False otherwise
|
|
*/
|
|
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
|
|
{
|
|
return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
|
|
}
|
|
|
|
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
|
|
|
|
int xe_svm_init(struct xe_vm *vm);
|
|
|
|
void xe_svm_fini(struct xe_vm *vm);
|
|
|
|
void xe_svm_close(struct xe_vm *vm);
|
|
|
|
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
|
|
struct xe_tile *tile, u64 fault_addr,
|
|
bool atomic);
|
|
|
|
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
|
|
|
|
int xe_svm_bo_evict(struct xe_bo *bo);
|
|
|
|
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
|
|
|
|
void xe_svm_flush(struct xe_vm *vm);
|
|
|
|
#else
|
|
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline
|
|
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline
|
|
int xe_svm_init(struct xe_vm *vm)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline
|
|
void xe_svm_fini(struct xe_vm *vm)
|
|
{
|
|
}
|
|
|
|
static inline
|
|
void xe_svm_close(struct xe_vm *vm)
|
|
{
|
|
}
|
|
|
|
static inline
|
|
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
|
|
struct xe_tile *tile, u64 fault_addr,
|
|
bool atomic)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline
|
|
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline
|
|
int xe_svm_bo_evict(struct xe_bo *bo)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline
|
|
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
|
|
{
|
|
}
|
|
|
|
static inline void xe_svm_flush(struct xe_vm *vm)
|
|
{
|
|
}
|
|
|
|
#endif
|
|
|
|
/**
|
|
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
|
|
* @range: SVM range
|
|
*
|
|
* Return: True if SVM range has a DMA mapping, False otherwise
|
|
*/
|
|
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
|
|
{
|
|
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
|
|
return range->base.flags.has_dma_mapping;
|
|
}
|
|
|
|
#define xe_svm_assert_in_notifier(vm__) \
|
|
lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
|
|
|
|
#define xe_svm_notifier_lock(vm__) \
|
|
drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
|
|
|
|
#define xe_svm_notifier_unlock(vm__) \
|
|
drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
|
|
|
|
#endif
|