mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
drm/xe/pf: Add helpers for migration data packet allocation / free
Now that it's possible to free the packets - connect the restore handling logic with the ring. The helpers will also be used in upcoming changes that will start producing migration data packets. Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> Link: https://patch.msgid.link/20251112132220.516975-7-michal.winiarski@intel.com Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
This commit is contained in:
@@ -174,6 +174,7 @@ xe-$(CONFIG_PCI_IOV) += \
|
||||
xe_lmtt_2l.o \
|
||||
xe_lmtt_ml.o \
|
||||
xe_pci_sriov.o \
|
||||
xe_sriov_packet.o \
|
||||
xe_sriov_pf.o \
|
||||
xe_sriov_pf_control.o \
|
||||
xe_sriov_pf_debugfs.o \
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include "xe_gt_sriov_printk.h"
|
||||
#include "xe_guc_ct.h"
|
||||
#include "xe_sriov.h"
|
||||
#include "xe_sriov_packet.h"
|
||||
#include "xe_sriov_packet_types.h"
|
||||
#include "xe_sriov_pf_control.h"
|
||||
#include "xe_sriov_pf_migration.h"
|
||||
@@ -853,6 +854,8 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
|
||||
static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
|
||||
xe_gt_sriov_pf_migration_ring_free(gt, vfid);
|
||||
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
|
||||
@@ -1075,6 +1078,8 @@ int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid)
|
||||
static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) {
|
||||
xe_gt_sriov_pf_migration_ring_free(gt, vfid);
|
||||
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA);
|
||||
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE);
|
||||
@@ -1109,6 +1114,8 @@ static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid)
|
||||
|
||||
xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n", vfid, data->hdr.type);
|
||||
|
||||
xe_sriov_packet_free(data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1202,8 +1209,10 @@ int xe_gt_sriov_pf_control_restore_data_done(struct xe_gt *gt, unsigned int vfid
|
||||
*/
|
||||
int xe_gt_sriov_pf_control_process_restore_data(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED))
|
||||
if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED)) {
|
||||
xe_gt_sriov_pf_migration_ring_free(gt, vfid);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
pf_exit_vf_restore_wait_data(gt, vfid);
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include "xe_guc.h"
|
||||
#include "xe_guc_ct.h"
|
||||
#include "xe_sriov.h"
|
||||
#include "xe_sriov_packet.h"
|
||||
#include "xe_sriov_packet_types.h"
|
||||
#include "xe_sriov_pf_migration.h"
|
||||
|
||||
@@ -419,6 +420,25 @@ bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid)
|
||||
return ptr_ring_full(&pf_pick_gt_migration(gt, vfid)->ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_migration_ring_free() - Consume and free all data in migration ring
|
||||
* @gt: the &xe_gt
|
||||
* @vfid: the VF identifier
|
||||
*/
|
||||
void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid);
|
||||
struct xe_sriov_packet *data;
|
||||
|
||||
if (ptr_ring_empty(&migration->ring))
|
||||
return;
|
||||
|
||||
xe_gt_sriov_notice(gt, "VF%u unprocessed migration data left in the ring!\n", vfid);
|
||||
|
||||
while ((data = ptr_ring_consume(&migration->ring)))
|
||||
xe_sriov_packet_free(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_migration_save_produce() - Add VF save data packet to migration ring.
|
||||
* @gt: the &xe_gt
|
||||
@@ -545,8 +565,10 @@ xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid)
|
||||
data = ptr_ring_consume(&migration->ring);
|
||||
if (data) {
|
||||
ret = xe_gt_sriov_pf_control_process_save_data(gt, vfid);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
xe_sriov_packet_free(data);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
@@ -560,11 +582,18 @@ xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
static void destroy_pf_packet(void *ptr)
|
||||
{
|
||||
struct xe_sriov_packet *data = ptr;
|
||||
|
||||
xe_sriov_packet_free(data);
|
||||
}
|
||||
|
||||
static void action_ring_cleanup(void *arg)
|
||||
{
|
||||
struct ptr_ring *r = arg;
|
||||
|
||||
ptr_ring_cleanup(r, NULL);
|
||||
ptr_ring_cleanup(r, destroy_pf_packet);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -17,6 +17,7 @@ int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vf
|
||||
|
||||
bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid);
|
||||
bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid);
|
||||
void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid);
|
||||
|
||||
int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid,
|
||||
struct xe_sriov_packet *data);
|
||||
|
||||
137
drivers/gpu/drm/xe/xe_sriov_packet.c
Normal file
137
drivers/gpu/drm/xe/xe_sriov_packet.c
Normal file
@@ -0,0 +1,137 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "xe_bo.h"
|
||||
#include "xe_device.h"
|
||||
#include "xe_printk.h"
|
||||
#include "xe_sriov_packet.h"
|
||||
#include "xe_sriov_packet_types.h"
|
||||
|
||||
static bool pkt_needs_bo(struct xe_sriov_packet *data)
|
||||
{
|
||||
return data->hdr.type == XE_SRIOV_PACKET_TYPE_VRAM;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_alloc() - Allocate migration data packet
|
||||
* @xe: the &xe_device
|
||||
*
|
||||
* Only allocates the "outer" structure, without initializing the migration
|
||||
* data backing storage.
|
||||
*
|
||||
* Return: Pointer to &xe_sriov_packet on success,
|
||||
* NULL in case of error.
|
||||
*/
|
||||
struct xe_sriov_packet *xe_sriov_packet_alloc(struct xe_device *xe)
|
||||
{
|
||||
struct xe_sriov_packet *data;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return NULL;
|
||||
|
||||
data->xe = xe;
|
||||
data->hdr_remaining = sizeof(data->hdr);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_free() - Free migration data packet.
|
||||
* @data: the &xe_sriov_packet
|
||||
*/
|
||||
void xe_sriov_packet_free(struct xe_sriov_packet *data)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(data))
|
||||
return;
|
||||
|
||||
if (pkt_needs_bo(data))
|
||||
xe_bo_unpin_map_no_vm(data->bo);
|
||||
else
|
||||
kvfree(data->buff);
|
||||
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static int pkt_init(struct xe_sriov_packet *data)
|
||||
{
|
||||
struct xe_gt *gt = xe_device_get_gt(data->xe, data->hdr.gt_id);
|
||||
|
||||
if (!gt)
|
||||
return -EINVAL;
|
||||
|
||||
if (data->hdr.size == 0)
|
||||
return 0;
|
||||
|
||||
if (pkt_needs_bo(data)) {
|
||||
struct xe_bo *bo;
|
||||
|
||||
bo = xe_bo_create_pin_map_novm(data->xe, gt->tile, PAGE_ALIGN(data->hdr.size),
|
||||
ttm_bo_type_kernel,
|
||||
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED, false);
|
||||
if (IS_ERR(bo))
|
||||
return PTR_ERR(bo);
|
||||
|
||||
data->bo = bo;
|
||||
data->vaddr = bo->vmap.vaddr;
|
||||
} else {
|
||||
void *buff = kvzalloc(data->hdr.size, GFP_KERNEL);
|
||||
|
||||
if (!buff)
|
||||
return -ENOMEM;
|
||||
|
||||
data->buff = buff;
|
||||
data->vaddr = buff;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define XE_SRIOV_PACKET_SUPPORTED_VERSION 1
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_init() - Initialize migration packet header and backing storage.
|
||||
* @data: the &xe_sriov_packet
|
||||
* @tile_id: tile identifier
|
||||
* @gt_id: GT identifier
|
||||
* @type: &xe_sriov_packet_type
|
||||
* @offset: offset of data packet payload (within wider resource)
|
||||
* @size: size of data packet payload
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_packet_init(struct xe_sriov_packet *data, u8 tile_id, u8 gt_id,
|
||||
enum xe_sriov_packet_type type, loff_t offset, size_t size)
|
||||
{
|
||||
data->hdr.version = XE_SRIOV_PACKET_SUPPORTED_VERSION;
|
||||
data->hdr.type = type;
|
||||
data->hdr.tile_id = tile_id;
|
||||
data->hdr.gt_id = gt_id;
|
||||
data->hdr.offset = offset;
|
||||
data->hdr.size = size;
|
||||
data->remaining = size;
|
||||
|
||||
return pkt_init(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_sriov_packet_init_from_hdr() - Initialize migration packet backing storage based on header.
|
||||
* @data: the &xe_sriov_packet
|
||||
*
|
||||
* Header data is expected to be filled prior to calling this function.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_packet_init_from_hdr(struct xe_sriov_packet *data)
|
||||
{
|
||||
xe_assert(data->xe, !data->hdr_remaining);
|
||||
|
||||
if (data->hdr.version != XE_SRIOV_PACKET_SUPPORTED_VERSION)
|
||||
return -EINVAL;
|
||||
|
||||
data->remaining = data->hdr.size;
|
||||
|
||||
return pkt_init(data);
|
||||
}
|
||||
22
drivers/gpu/drm/xe/xe_sriov_packet.h
Normal file
22
drivers/gpu/drm/xe/xe_sriov_packet.h
Normal file
@@ -0,0 +1,22 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_SRIOV_PACKET_H_
|
||||
#define _XE_SRIOV_PACKET_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct xe_device;
|
||||
struct xe_sriov_packet;
|
||||
enum xe_sriov_packet_type;
|
||||
|
||||
struct xe_sriov_packet *xe_sriov_packet_alloc(struct xe_device *xe);
|
||||
void xe_sriov_packet_free(struct xe_sriov_packet *data);
|
||||
|
||||
int xe_sriov_packet_init(struct xe_sriov_packet *data, u8 tile_id, u8 gt_id,
|
||||
enum xe_sriov_packet_type, loff_t offset, size_t size);
|
||||
int xe_sriov_packet_init_from_hdr(struct xe_sriov_packet *data);
|
||||
|
||||
#endif
|
||||
@@ -8,6 +8,25 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* enum xe_sriov_packet_type - Xe SR-IOV VF migration data packet type
|
||||
* @XE_SRIOV_PACKET_TYPE_DESCRIPTOR: Descriptor with VF device metadata
|
||||
* @XE_SRIOV_PACKET_TYPE_TRAILER: Trailer indicating end-of-stream
|
||||
* @XE_SRIOV_PACKET_TYPE_GGTT: Global GTT migration data
|
||||
* @XE_SRIOV_PACKET_TYPE_MMIO: MMIO registers migration data
|
||||
* @XE_SRIOV_PACKET_TYPE_GUC: GuC firmware migration data
|
||||
* @XE_SRIOV_PACKET_TYPE_VRAM: VRAM migration data
|
||||
*/
|
||||
enum xe_sriov_packet_type {
|
||||
/* Skipping 0 to catch uninitialized data */
|
||||
XE_SRIOV_PACKET_TYPE_DESCRIPTOR = 1,
|
||||
XE_SRIOV_PACKET_TYPE_TRAILER,
|
||||
XE_SRIOV_PACKET_TYPE_GGTT,
|
||||
XE_SRIOV_PACKET_TYPE_MMIO,
|
||||
XE_SRIOV_PACKET_TYPE_GUC,
|
||||
XE_SRIOV_PACKET_TYPE_VRAM,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_sriov_packet_hdr - Xe SR-IOV VF migration data packet header
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user