drm/xe/sa: Shadow buffer support in the sub-allocator pool

The existing sub-allocator is limited to managing a single buffer object.
This enhancement introduces shadow buffer functionality to support
scenarios requiring dual buffer management.

The changes include added shadow buffer object creation capability,
Management for both primary and shadow buffers, and appropriate locking
mechanisms for thread-safe operations.

This enables more flexible buffer allocation strategies in scenarios where
shadow buffering is required.

Signed-off-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Suggested-by: Matthew Brost <matthew.brost@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251118120745.3460172-2-satyanarayana.k.v.p@intel.com
This commit is contained in:
Satyanarayana K V P
2025-11-18 12:07:44 +00:00
committed by Matthew Brost
parent c34a14bce7
commit 1f2cf5295c
5 changed files with 91 additions and 4 deletions

View File

@@ -30,7 +30,7 @@ static int guc_buf_cache_init(struct xe_guc_buf_cache *cache, u32 size)
struct xe_gt *gt = cache_to_gt(cache); struct xe_gt *gt = cache_to_gt(cache);
struct xe_sa_manager *sam; struct xe_sa_manager *sam;
sam = __xe_sa_bo_manager_init(gt_to_tile(gt), size, 0, sizeof(u32)); sam = __xe_sa_bo_manager_init(gt_to_tile(gt), size, 0, sizeof(u32), 0);
if (IS_ERR(sam)) if (IS_ERR(sam))
return PTR_ERR(sam); return PTR_ERR(sam);
cache->sam = sam; cache->sam = sam;

View File

@@ -29,6 +29,7 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
kvfree(sa_manager->cpu_ptr); kvfree(sa_manager->cpu_ptr);
sa_manager->bo = NULL; sa_manager->bo = NULL;
sa_manager->shadow = NULL;
} }
/** /**
@@ -37,12 +38,14 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
* @size: number of bytes to allocate * @size: number of bytes to allocate
* @guard: number of bytes to exclude from suballocations * @guard: number of bytes to exclude from suballocations
* @align: alignment for each suballocated chunk * @align: alignment for each suballocated chunk
* @flags: flags for suballocator
* *
* Prepares the suballocation manager for suballocations. * Prepares the suballocation manager for suballocations.
* *
* Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure. * Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure.
*/ */
struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align) struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size,
u32 guard, u32 align, u32 flags)
{ {
struct xe_device *xe = tile_to_xe(tile); struct xe_device *xe = tile_to_xe(tile);
struct xe_sa_manager *sa_manager; struct xe_sa_manager *sa_manager;
@@ -79,6 +82,26 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size); memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
} }
if (flags & XE_SA_BO_MANAGER_FLAG_SHADOW) {
struct xe_bo *shadow;
ret = drmm_mutex_init(&xe->drm, &sa_manager->swap_guard);
if (ret)
return ERR_PTR(ret);
shadow = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE |
XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(shadow)) {
drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
size / SZ_1K, shadow);
return ERR_CAST(shadow);
}
sa_manager->shadow = shadow;
}
drm_suballoc_manager_init(&sa_manager->base, managed_size, align); drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini, ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
sa_manager); sa_manager);
@@ -88,6 +111,48 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
return sa_manager; return sa_manager;
} }
/**
* xe_sa_bo_swap_shadow() - Swap the SA BO with shadow BO.
* @sa_manager: the XE sub allocator manager
*
* Swaps the sub-allocator primary buffer object with shadow buffer object.
*
* Return: None.
*/
void xe_sa_bo_swap_shadow(struct xe_sa_manager *sa_manager)
{
struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
xe_assert(xe, sa_manager->shadow);
lockdep_assert_held(&sa_manager->swap_guard);
swap(sa_manager->bo, sa_manager->shadow);
if (!sa_manager->bo->vmap.is_iomem)
sa_manager->cpu_ptr = sa_manager->bo->vmap.vaddr;
}
/**
* xe_sa_bo_sync_shadow() - Sync the SA Shadow BO with primary BO.
* @sa_bo: the sub-allocator buffer object.
*
* Synchronize sub-allocator shadow buffer object with primary buffer object.
*
* Return: None.
*/
void xe_sa_bo_sync_shadow(struct drm_suballoc *sa_bo)
{
struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
xe_assert(xe, sa_manager->shadow);
lockdep_assert_held(&sa_manager->swap_guard);
xe_map_memcpy_to(xe, &sa_manager->shadow->vmap,
drm_suballoc_soffset(sa_bo),
xe_sa_bo_cpu_addr(sa_bo),
drm_suballoc_size(sa_bo));
}
/** /**
* __xe_sa_bo_new() - Make a suballocation but use custom gfp flags. * __xe_sa_bo_new() - Make a suballocation but use custom gfp flags.
* @sa_manager: the &xe_sa_manager * @sa_manager: the &xe_sa_manager

View File

@@ -14,12 +14,14 @@
struct dma_fence; struct dma_fence;
struct xe_tile; struct xe_tile;
struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align); #define XE_SA_BO_MANAGER_FLAG_SHADOW BIT(0)
struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size,
u32 guard, u32 align, u32 flags);
struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp); struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp);
static inline struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align) static inline struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align)
{ {
return __xe_sa_bo_manager_init(tile, size, SZ_4K, align); return __xe_sa_bo_manager_init(tile, size, SZ_4K, align, 0);
} }
/** /**
@@ -69,4 +71,18 @@ static inline void *xe_sa_bo_cpu_addr(struct drm_suballoc *sa)
drm_suballoc_soffset(sa); drm_suballoc_soffset(sa);
} }
void xe_sa_bo_swap_shadow(struct xe_sa_manager *sa_manager);
void xe_sa_bo_sync_shadow(struct drm_suballoc *sa_bo);
/**
* xe_sa_bo_swap_guard() - Retrieve the SA BO swap guard within sub-allocator.
* @sa_manager: the &xe_sa_manager
*
* Return: Sub alloctor swap guard mutex.
*/
static inline struct mutex *xe_sa_bo_swap_guard(struct xe_sa_manager *sa_manager)
{
return &sa_manager->swap_guard;
}
#endif #endif

View File

@@ -12,6 +12,9 @@ struct xe_bo;
struct xe_sa_manager { struct xe_sa_manager {
struct drm_suballoc_manager base; struct drm_suballoc_manager base;
struct xe_bo *bo; struct xe_bo *bo;
struct xe_bo *shadow;
/** @swap_guard: Timeline guard updating @bo and @shadow */
struct mutex swap_guard;
void *cpu_ptr; void *cpu_ptr;
bool is_iomem; bool is_iomem;
}; };

View File

@@ -162,9 +162,12 @@ static int alloc_bb_pool(struct xe_tile *tile, struct xe_sriov_vf_ccs_ctx *ctx)
offset = 0; offset = 0;
xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP, xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP,
bb_pool_size); bb_pool_size);
xe_map_memset(xe, &sa_manager->shadow->vmap, offset, MI_NOOP,
bb_pool_size);
offset = bb_pool_size - sizeof(u32); offset = bb_pool_size - sizeof(u32);
xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END); xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END);
xe_map_wr(xe, &sa_manager->shadow->vmap, offset, u32, MI_BATCH_BUFFER_END);
ctx->mem.ccs_bb_pool = sa_manager; ctx->mem.ccs_bb_pool = sa_manager;