drm/xe/vf: Shifting GGTT area post migration

We have only one GGTT for all IOV functions, with each VF having assigned
a range of addresses for its use. After migration, a VF can receive a
different range of addresses than it had initially.

This implements shifting GGTT addresses within drm_mm nodes, so that
VMAs stay valid after migration. This will make the driver use new
addresses when accessing GGTT from the moment the shifting ends.

By taking the ggtt->lock for the period of VMA fixups, this change
also adds constraint on that mutex. Any locks used during the recovery
cannot ever wait for hardware response - because after migration,
the hardware will not do anything until fixups are finished.

v2: Moved some functs to xe_ggtt.c; moved shift computation to just
  after querying; improved documentation; switched some warns to asserts;
  skipping fixups when GGTT shift eq 0; iterating through tiles (Michal)
v3: Updated kerneldocs, removed unused funct, properly allocate
  balloning nodes if non existent
v4: Re-used ballooning functions from VF init, used bool in place of
  standard error codes
v5: Renamed one function
v6: Subject tag change, several kerneldocs updated, some functions
  renamed, some moved, added several asserts, shuffled declarations
  of variables, revealed more detail in high level functions
v7: Fixed typos, added `_locked` suffix to some functs, improved
  readability of asserts, removed unneeded conditional
v8: Moved one function, removed implementation detail from kerneldoc,
  added asserts
v9: Code shuffling without much change, and one param rename
v10: Minor error path change, added printing the shift via debugfs

Signed-off-by: Tomasz Lis <tomasz.lis@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://lore.kernel.org/r/20250512114018.361843-3-tomasz.lis@intel.com
This commit is contained in:
Tomasz Lis
2025-05-12 13:40:16 +02:00
committed by Michal Wajdeczko
parent dd39212b5f
commit 3e693945b1
6 changed files with 180 additions and 0 deletions

View File

@@ -484,6 +484,56 @@ void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node)
drm_mm_remove_node(&node->base);
}
static void xe_ggtt_assert_fit(struct xe_ggtt *ggtt, u64 start, u64 size)
{
struct xe_tile *tile = ggtt->tile;
struct xe_device *xe = tile_to_xe(tile);
u64 __maybe_unused wopcm = xe_wopcm_size(xe);
xe_tile_assert(tile, start >= wopcm);
xe_tile_assert(tile, start + size < ggtt->size - wopcm);
}
/**
* xe_ggtt_shift_nodes_locked - Shift GGTT nodes to adjust for a change in usable address range.
* @ggtt: the &xe_ggtt struct instance
* @shift: change to the location of area provisioned for current VF
*
* This function moves all nodes from the GGTT VM, to a temp list. These nodes are expected
* to represent allocations in range formerly assigned to current VF, before the range changed.
* When the GGTT VM is completely clear of any nodes, they are re-added with shifted offsets.
*
* The function has no ability of failing - because it shifts existing nodes, without
* any additional processing. If the nodes were successfully existing at the old address,
* they will do the same at the new one. A fail inside this function would indicate that
* the list of nodes was either already damaged, or that the shift brings the address range
* outside of valid bounds. Both cases justify an assert rather than error code.
*/
void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift)
{
struct xe_tile *tile __maybe_unused = ggtt->tile;
struct drm_mm_node *node, *tmpn;
LIST_HEAD(temp_list_head);
lockdep_assert_held(&ggtt->lock);
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm)
xe_ggtt_assert_fit(ggtt, node->start + shift, node->size);
drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) {
drm_mm_remove_node(node);
list_add(&node->node_list, &temp_list_head);
}
list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) {
list_del(&node->node_list);
node->start += shift;
drm_mm_reserve_node(&ggtt->mm, node);
xe_tile_assert(tile, drm_mm_node_allocated(node));
}
}
/**
* xe_ggtt_node_insert_locked - Locked version to insert a &xe_ggtt_node into the GGTT
* @node: the &xe_ggtt_node to be inserted

View File

@@ -18,6 +18,7 @@ void xe_ggtt_node_fini(struct xe_ggtt_node *node);
int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node,
u64 start, u64 size);
void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node);
void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift);
int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align);
int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,

View File

@@ -415,6 +415,7 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
start, start + size - 1, size / SZ_1K);
config->ggtt_shift = start - (s64)config->ggtt_base;
config->ggtt_base = start;
config->ggtt_size = size;
@@ -560,6 +561,24 @@ u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
return gt->sriov.vf.self_config.lmem_size;
}
/**
* xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
* @gt: the &xe_gt struct instance
*
* This function is for VF use only.
*
* Return: The shift value; could be negative
*/
s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
{
struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
return config->ggtt_shift;
}
static int vf_init_ggtt_balloons(struct xe_gt *gt)
{
struct xe_tile *tile = gt_to_tile(gt);
@@ -817,6 +836,89 @@ failed:
return err;
}
/**
* DOC: GGTT nodes shifting during VF post-migration recovery
*
* The first fixup applied to the VF KMD structures as part of post-migration
* recovery is shifting nodes within &xe_ggtt instance. The nodes are moved
* from range previously assigned to this VF, into newly provisioned area.
* The changes include balloons, which are resized accordingly.
*
* The balloon nodes are there to eliminate unavailable ranges from use: one
* reserves the GGTT area below the range for current VF, and another one
* reserves area above.
*
* Below is a GGTT layout of example VF, with a certain address range assigned to
* said VF, and inaccessible areas above and below:
*
* 0 4GiB
* |<--------------------------- Total GGTT size ----------------------------->|
* WOPCM GUC_TOP
* |<-------------- Area mappable by xe_ggtt instance ---------------->|
*
* +---+---------------------------------+----------+----------------------+---+
* |\\\|/////////////////////////////////| VF mem |//////////////////////|\\\|
* +---+---------------------------------+----------+----------------------+---+
*
* Hardware enforced access rules before migration:
*
* |<------- inaccessible for VF ------->|<VF owned>|<-- inaccessible for VF ->|
*
* GGTT nodes used for tracking allocations:
*
* |<---------- balloon ------------>|<- nodes->|<----- balloon ------>|
*
* After the migration, GGTT area assigned to the VF might have shifted, either
* to lower or to higher address. But we expect the total size and extra areas to
* be identical, as migration can only happen between matching platforms.
* Below is an example of GGTT layout of the VF after migration. Content of the
* GGTT for VF has been moved to a new area, and we receive its address from GuC:
*
* +---+----------------------+----------+---------------------------------+---+
* |\\\|//////////////////////| VF mem |/////////////////////////////////|\\\|
* +---+----------------------+----------+---------------------------------+---+
*
* Hardware enforced access rules after migration:
*
* |<- inaccessible for VF -->|<VF owned>|<------- inaccessible for VF ------->|
*
* So the VF has a new slice of GGTT assigned, and during migration process, the
* memory content was copied to that new area. But the &xe_ggtt nodes are still
* tracking allocations using the old addresses. The nodes within VF owned area
* have to be shifted, and balloon nodes need to be resized to properly mask out
* areas not owned by the VF.
*
* Fixed &xe_ggtt nodes used for tracking allocations:
*
* |<------ balloon ------>|<- nodes->|<----------- balloon ----------->|
*
* Due to use of GPU profiles, we do not expect the old and new GGTT ares to
* overlap; but our node shifting will fix addresses properly regardless.
*/
/**
* xe_gt_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range.
* @gt: the &xe_gt struct instance
* @shift: the shift value
*
* Since Global GTT is not virtualized, each VF has an assigned range
* within the global space. This range might have changed during migration,
* which requires all memory addresses pointing to GGTT to be shifted.
*/
void xe_gt_sriov_vf_fixup_ggtt_nodes(struct xe_gt *gt, s64 shift)
{
struct xe_tile *tile = gt_to_tile(gt);
struct xe_ggtt *ggtt = tile->mem.ggtt;
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
mutex_lock(&ggtt->lock);
xe_gt_sriov_vf_deballoon_ggtt_locked(gt);
xe_ggtt_shift_nodes_locked(ggtt, shift);
xe_gt_sriov_vf_balloon_ggtt_locked(gt);
mutex_unlock(&ggtt->lock);
}
/**
* xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
* or just mark that a GuC is ready for it.
@@ -1103,6 +1205,8 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);

View File

@@ -20,6 +20,8 @@ int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt);
int xe_gt_sriov_vf_balloon_ggtt_locked(struct xe_gt *gt);
void xe_gt_sriov_vf_deballoon_ggtt_locked(struct xe_gt *gt);
s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt);
void xe_gt_sriov_vf_fixup_ggtt_nodes(struct xe_gt *gt, s64 shift);
int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);

View File

@@ -40,6 +40,8 @@ struct xe_gt_sriov_vf_selfconfig {
u64 ggtt_base;
/** @ggtt_size: assigned size of the GGTT region. */
u64 ggtt_size;
/** @ggtt_shift: difference in ggtt_base on last migration */
s64 ggtt_shift;
/** @lmem_size: assigned size of the LMEM. */
u64 lmem_size;
/** @num_ctxs: assigned number of GuC submission context IDs. */

View File

@@ -7,6 +7,7 @@
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_sriov_printk.h"
#include "xe_gt_sriov_vf.h"
#include "xe_pm.h"
@@ -170,6 +171,25 @@ static bool vf_post_migration_imminent(struct xe_device *xe)
work_pending(&xe->sriov.vf.migration.worker);
}
static bool vf_post_migration_fixup_ggtt_nodes(struct xe_device *xe)
{
bool need_fixups = false;
struct xe_tile *tile;
unsigned int id;
for_each_tile(tile, xe, id) {
struct xe_gt *gt = tile->primary_gt;
s64 shift;
shift = xe_gt_sriov_vf_ggtt_shift(gt);
if (shift) {
need_fixups = true;
xe_gt_sriov_vf_fixup_ggtt_nodes(gt, shift);
}
}
return need_fixups;
}
/*
* Notify all GuCs about resource fixups apply finished.
*/
@@ -201,6 +221,7 @@ static void vf_post_migration_recovery(struct xe_device *xe)
if (unlikely(err))
goto fail;
vf_post_migration_fixup_ggtt_nodes(xe);
/* FIXME: add the recovery steps */
vf_post_migration_notify_resfix_done(xe);
xe_pm_runtime_put(xe);