From 7c0389c615b5c6aef1b0e38e30db759b3cbff885 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 1 Jul 2024 21:30:30 +0200 Subject: [PATCH 01/95] drm/xe/guc: Demote GuC IDs usage message to debug Printing message at INFO level about available GuC IDs is not that important, DEBUG level is enough. It will also match message about available doorbells: [ ] xe ... [drm:xe_guc_id_mgr_init [xe]] GT0: using 65535 GuC IDs [ ] xe ... [drm:xe_guc_db_mgr_init [xe]] GT0: using 256 doorbells While at it, use proper "GuC" name. Signed-off-by: Michal Wajdeczko Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20240701193030.978-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_guc_id_mgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_guc_id_mgr.c b/drivers/gpu/drm/xe/xe_guc_id_mgr.c index cd0549d0ef89..e845425d670b 100644 --- a/drivers/gpu/drm/xe/xe_guc_id_mgr.c +++ b/drivers/gpu/drm/xe/xe_guc_id_mgr.c @@ -97,8 +97,8 @@ int xe_guc_id_mgr_init(struct xe_guc_id_mgr *idm, unsigned int limit) if (ret) return ret; - xe_gt_info(idm_to_gt(idm), "using %u GUC ID%s\n", - idm->total, str_plural(idm->total)); + xe_gt_dbg(idm_to_gt(idm), "using %u GuC ID%s\n", + idm->total, str_plural(idm->total)); return 0; } From 627c961d672d3304564455ba471f5e4405170eec Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Tue, 25 Jun 2024 17:41:37 -0700 Subject: [PATCH 02/95] drm/xe: Add timeout to preempt fences To adhere to dma fencing rules that fences must signal within a reasonable amount of time, add a 5 second timeout to preempt fences. If this timeout occurs, kill the associated VM as this fatal to the VM. v2: - Add comment for smp_wmb (Checkpatch) - Fix kernel doc typo (Inspection) - Add comment for killed check (Niranjana) v3: - Drop smp_wmb (Matthew Auld) - Don't take vm->lock in preempt fence worker (Matthew Auld) - Drop RB given changes to patch v4: - Add WRITE/READ_ONCE (Niranjana) - Don't export xe_vm_kill (Niranjana) Cc: Matthew Auld Cc: Niranjana Vishwanathapura Signed-off-by: Matthew Brost Tested-by: Stuart Summers Reviewed-by: Niranjana Vishwanathapura Link: https://patchwork.freedesktop.org/patch/msgid/20240626004137.4060806-1-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_exec_queue_types.h | 6 ++-- drivers/gpu/drm/xe/xe_execlist.c | 3 +- drivers/gpu/drm/xe/xe_guc_submit.c | 39 ++++++++++++++++++++---- drivers/gpu/drm/xe/xe_preempt_fence.c | 12 ++++++-- drivers/gpu/drm/xe/xe_vm.c | 12 +++++++- 5 files changed, 59 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 201588ec33c3..ded9f9396429 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -172,9 +172,11 @@ struct xe_exec_queue_ops { int (*suspend)(struct xe_exec_queue *q); /** * @suspend_wait: Wait for an exec queue to suspend executing, should be - * call after suspend. + * call after suspend. In dma-fencing path thus must return within a + * reasonable amount of time. -ETIME return shall indicate an error + * waiting for suspend resulting in associated VM getting killed. */ - void (*suspend_wait)(struct xe_exec_queue *q); + int (*suspend_wait)(struct xe_exec_queue *q); /** * @resume: Resume exec queue execution, exec queue must be in a suspended * state and dma fence returned from most recent suspend call must be diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c index db906117db6d..7502e3486eaf 100644 --- a/drivers/gpu/drm/xe/xe_execlist.c +++ b/drivers/gpu/drm/xe/xe_execlist.c @@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q) return 0; } -static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q) +static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q) { /* NIY */ + return 0; } static void execlist_exec_queue_resume(struct xe_exec_queue *q) diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 373447758a60..6392381e8e69 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1301,6 +1301,15 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms kfree(msg); } +static void __suspend_fence_signal(struct xe_exec_queue *q) +{ + if (!q->guc->suspend_pending) + return; + + WRITE_ONCE(q->guc->suspend_pending, false); + wake_up(&q->guc->suspend_wait); +} + static void suspend_fence_signal(struct xe_exec_queue *q) { struct xe_guc *guc = exec_queue_to_guc(q); @@ -1310,9 +1319,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q) guc_read_stopped(guc)); xe_assert(xe, q->guc->suspend_pending); - q->guc->suspend_pending = false; - smp_wmb(); - wake_up(&q->guc->suspend_wait); + __suspend_fence_signal(q); } static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg) @@ -1465,6 +1472,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q) { trace_xe_exec_queue_kill(q); set_exec_queue_killed(q); + __suspend_fence_signal(q); xe_guc_exec_queue_trigger_cleanup(q); } @@ -1561,12 +1569,31 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q) return 0; } -static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q) +static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) { struct xe_guc *guc = exec_queue_to_guc(q); + int ret; - wait_event(q->guc->suspend_wait, !q->guc->suspend_pending || - guc_read_stopped(guc)); + /* + * Likely don't need to check exec_queue_killed() as we clear + * suspend_pending upon kill but to be paranoid but races in which + * suspend_pending is set after kill also check kill here. + */ + ret = wait_event_timeout(q->guc->suspend_wait, + !READ_ONCE(q->guc->suspend_pending) || + exec_queue_killed(q) || + guc_read_stopped(guc), + HZ * 5); + + if (!ret) { + xe_gt_warn(guc_to_gt(guc), + "Suspend fence, guc_id=%d, failed to respond", + q->guc->id); + /* XXX: Trigger GT reset? */ + return -ETIME; + } + + return 0; } static void guc_exec_queue_resume(struct xe_exec_queue *q) diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c index e8b8ae5c6485..56e709d2fb30 100644 --- a/drivers/gpu/drm/xe/xe_preempt_fence.c +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c @@ -17,10 +17,16 @@ static void preempt_fence_work_func(struct work_struct *w) container_of(w, typeof(*pfence), preempt_work); struct xe_exec_queue *q = pfence->q; - if (pfence->error) + if (pfence->error) { dma_fence_set_error(&pfence->base, pfence->error); - else - q->ops->suspend_wait(q); + } else if (!q->ops->reset_status(q)) { + int err = q->ops->suspend_wait(q); + + if (err) + dma_fence_set_error(&pfence->base, err); + } else { + dma_fence_set_error(&pfence->base, -ENOENT); + } dma_fence_signal(&pfence->base); /* diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 5b166fa03684..0c764647a552 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -133,8 +133,10 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm) if (q->lr.pfence) { long timeout = dma_fence_wait(q->lr.pfence, false); - if (timeout < 0) + /* Only -ETIME on fence indicates VM needs to be killed */ + if (timeout < 0 || q->lr.pfence->error == -ETIME) return -ETIME; + dma_fence_put(q->lr.pfence); q->lr.pfence = NULL; } @@ -311,6 +313,14 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm) #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000 +/* + * xe_vm_kill() - VM Kill + * @vm: The VM. + * @unlocked: Flag indicates the VM's dma-resv is not held + * + * Kill the VM by setting banned flag indicated VM is no longer available for + * use. If in preempt fence mode, also kill all exec queue attached to the VM. + */ static void xe_vm_kill(struct xe_vm *vm, bool unlocked) { struct xe_exec_queue *q; From 8169b2097d88d99d7e4a72e20e4b549efe9eb8d7 Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Wed, 3 Jul 2024 09:48:01 -0700 Subject: [PATCH 03/95] drm/xe/uapi: Rename xe perf layer as xe observation layer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In Xe, the perf layer allows capture of HW counter streams. These HW counters are generally performance related but don't have to be necessarily so. Also, the name "perf" is a carryover from i915 and is not preferred. Here we propose the name "observation" for this common layer which allows capture of different types of these counter streams. v2: Rename observability layer to observation layer (Lucas/Rodrigo) v3: Rename sysctl file to "observation_paranoid" (Jose) Fixes: 52c2e956dceb ("drm/xe/perf/uapi: "Perf" layer to support multiple perf counter stream types") Fixes: fe8929bdf835 ("drm/xe/perf/uapi: Add perf_stream_paranoid sysctl") Acked-by: Lucas De Marchi Acked-by: Rodrigo Vivi Signed-off-by: Ashutosh Dixit Reviewed-by: Umesh Nerlige Ramappa Acked-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20240703164801.2561423-1-ashutosh.dixit@intel.com --- drivers/gpu/drm/xe/Makefile | 2 +- drivers/gpu/drm/xe/xe_device.c | 4 +- drivers/gpu/drm/xe/xe_device_types.h | 2 +- drivers/gpu/drm/xe/xe_gt_types.h | 2 +- drivers/gpu/drm/xe/xe_module.c | 6 +- drivers/gpu/drm/xe/xe_oa.c | 34 ++++----- drivers/gpu/drm/xe/xe_observation.c | 93 ++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_observation.h | 20 ++++++ drivers/gpu/drm/xe/xe_perf.c | 92 ------------------------ drivers/gpu/drm/xe/xe_perf.h | 20 ------ include/uapi/drm/xe_drm.h | 102 ++++++++++++++------------- 11 files changed, 190 insertions(+), 187 deletions(-) create mode 100644 drivers/gpu/drm/xe/xe_observation.c create mode 100644 drivers/gpu/drm/xe/xe_observation.h delete mode 100644 drivers/gpu/drm/xe/xe_perf.c delete mode 100644 drivers/gpu/drm/xe/xe_perf.h diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index b1e03bfe4a68..628c245c4822 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -96,10 +96,10 @@ xe-y += xe_bb.o \ xe_mocs.o \ xe_module.o \ xe_oa.o \ + xe_observation.o \ xe_pat.o \ xe_pci.o \ xe_pcode.o \ - xe_perf.o \ xe_pm.o \ xe_preempt_fence.o \ xe_pt.o \ diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index cfda7cb5df2c..03492fbcb8fb 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -42,9 +42,9 @@ #include "xe_memirq.h" #include "xe_mmio.h" #include "xe_module.h" +#include "xe_observation.h" #include "xe_pat.h" #include "xe_pcode.h" -#include "xe_perf.h" #include "xe_pm.h" #include "xe_query.h" #include "xe_sriov.h" @@ -142,7 +142,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = { DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(XE_PERF, xe_perf_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW), }; static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index c37be471d11c..3bca6d344744 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -463,7 +463,7 @@ struct xe_device { /** @heci_gsc: graphics security controller */ struct xe_heci_gsc heci_gsc; - /** @oa: oa perf counter subsystem */ + /** @oa: oa observation subsystem */ struct xe_oa oa; /** @needs_flr_on_fini: requests function-reset on fini */ diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index 24bb95de920f..6b5e0b45efb0 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -389,7 +389,7 @@ struct xe_gt { u8 instances_per_class[XE_ENGINE_CLASS_MAX]; } user_engines; - /** @oa: oa perf counter subsystem per gt info */ + /** @oa: oa observation subsystem per gt info */ struct xe_oa_gt oa; }; diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c index 893858a2eea0..499540add465 100644 --- a/drivers/gpu/drm/xe/xe_module.c +++ b/drivers/gpu/drm/xe/xe_module.c @@ -11,7 +11,7 @@ #include "xe_drv.h" #include "xe_hw_fence.h" #include "xe_pci.h" -#include "xe_perf.h" +#include "xe_observation.h" #include "xe_sched_job.h" struct xe_modparam xe_modparam = { @@ -80,8 +80,8 @@ static const struct init_funcs init_funcs[] = { .exit = xe_unregister_pci_driver, }, { - .init = xe_perf_sysctl_register, - .exit = xe_perf_sysctl_unregister, + .init = xe_observation_sysctl_register, + .exit = xe_observation_sysctl_unregister, }, }; diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 4188516a7816..6d69f751bf78 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -32,7 +32,7 @@ #include "xe_macros.h" #include "xe_mmio.h" #include "xe_oa.h" -#include "xe_perf.h" +#include "xe_observation.h" #include "xe_pm.h" #include "xe_sched_job.h" #include "xe_sriov.h" @@ -481,7 +481,7 @@ static int __xe_oa_read(struct xe_oa_stream *stream, char __user *buf, OASTATUS_RELEVANT_BITS, 0); /* * Signal to userspace that there is non-zero OA status to read via - * @DRM_XE_PERF_IOCTL_STATUS perf fd ioctl + * @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl */ if (stream->oa_status & OASTATUS_RELEVANT_BITS) return -EIO; @@ -1158,15 +1158,15 @@ static long xe_oa_ioctl_locked(struct xe_oa_stream *stream, unsigned long arg) { switch (cmd) { - case DRM_XE_PERF_IOCTL_ENABLE: + case DRM_XE_OBSERVATION_IOCTL_ENABLE: return xe_oa_enable_locked(stream); - case DRM_XE_PERF_IOCTL_DISABLE: + case DRM_XE_OBSERVATION_IOCTL_DISABLE: return xe_oa_disable_locked(stream); - case DRM_XE_PERF_IOCTL_CONFIG: + case DRM_XE_OBSERVATION_IOCTL_CONFIG: return xe_oa_config_locked(stream, arg); - case DRM_XE_PERF_IOCTL_STATUS: + case DRM_XE_OBSERVATION_IOCTL_STATUS: return xe_oa_status_locked(stream, arg); - case DRM_XE_PERF_IOCTL_INFO: + case DRM_XE_OBSERVATION_IOCTL_INFO: return xe_oa_info_locked(stream, arg); } @@ -1209,7 +1209,7 @@ static int xe_oa_release(struct inode *inode, struct file *file) xe_oa_destroy_locked(stream); mutex_unlock(>->oa.gt_lock); - /* Release the reference the perf stream kept on the driver */ + /* Release the reference the OA stream kept on the driver */ drm_dev_put(>_to_xe(gt)->drm); return 0; @@ -1222,7 +1222,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma) unsigned long start = vma->vm_start; int i, ret; - if (xe_perf_stream_paranoid && !perfmon_capable()) { + if (xe_observation_paranoid && !perfmon_capable()) { drm_dbg(&stream->oa->xe->drm, "Insufficient privilege to map OA buffer\n"); return -EACCES; } @@ -1789,8 +1789,8 @@ static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number * @file: @drm_file * * The functions opens an OA stream. An OA stream, opened with specified - * properties, enables perf counter samples to be collected, either - * periodically (time based sampling), or on request (using perf queries) + * properties, enables OA counter samples to be collected, either + * periodically (time based sampling), or on request (using OA queries) */ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *file) { @@ -1836,8 +1836,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f privileged_op = true; } - if (privileged_op && xe_perf_stream_paranoid && !perfmon_capable()) { - drm_dbg(&oa->xe->drm, "Insufficient privileges to open xe perf stream\n"); + if (privileged_op && xe_observation_paranoid && !perfmon_capable()) { + drm_dbg(&oa->xe->drm, "Insufficient privileges to open xe OA stream\n"); ret = -EACCES; goto err_exec_q; } @@ -2097,7 +2097,7 @@ int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *fi return -ENODEV; } - if (xe_perf_stream_paranoid && !perfmon_capable()) { + if (xe_observation_paranoid && !perfmon_capable()) { drm_dbg(&oa->xe->drm, "Insufficient privileges to add xe OA config\n"); return -EACCES; } @@ -2181,7 +2181,7 @@ reg_err: /** * xe_oa_remove_config_ioctl - Removes one OA config * @dev: @drm_device - * @data: pointer to struct @drm_xe_perf_param + * @data: pointer to struct @drm_xe_observation_param * @file: @drm_file */ int xe_oa_remove_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *file) @@ -2197,7 +2197,7 @@ int xe_oa_remove_config_ioctl(struct drm_device *dev, u64 data, struct drm_file return -ENODEV; } - if (xe_perf_stream_paranoid && !perfmon_capable()) { + if (xe_observation_paranoid && !perfmon_capable()) { drm_dbg(&oa->xe->drm, "Insufficient privileges to remove xe OA config\n"); return -EACCES; } @@ -2381,7 +2381,7 @@ static int xe_oa_init_gt(struct xe_gt *gt) /* * Fused off engines can result in oa_unit's with num_engines == 0. These units - * will appear in OA unit query, but no perf streams can be opened on them. + * will appear in OA unit query, but no OA streams can be opened on them. */ gt->oa.num_oa_units = num_oa_units; gt->oa.oa_unit = u; diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c new file mode 100644 index 000000000000..fcb584b42a7d --- /dev/null +++ b/drivers/gpu/drm/xe/xe_observation.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023-2024 Intel Corporation + */ + +#include +#include + +#include + +#include "xe_oa.h" +#include "xe_observation.h" + +u32 xe_observation_paranoid = true; +static struct ctl_table_header *sysctl_header; + +static int xe_oa_ioctl(struct drm_device *dev, struct drm_xe_observation_param *arg, + struct drm_file *file) +{ + switch (arg->observation_op) { + case DRM_XE_OBSERVATION_OP_STREAM_OPEN: + return xe_oa_stream_open_ioctl(dev, arg->param, file); + case DRM_XE_OBSERVATION_OP_ADD_CONFIG: + return xe_oa_add_config_ioctl(dev, arg->param, file); + case DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: + return xe_oa_remove_config_ioctl(dev, arg->param, file); + default: + return -EINVAL; + } +} + +/** + * xe_observation_ioctl - The top level observation layer ioctl + * @dev: @drm_device + * @data: pointer to struct @drm_xe_observation_param + * @file: @drm_file + * + * The function is called for different observation streams types and + * allows execution of different operations supported by those stream + * types. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_xe_observation_param *arg = data; + + if (arg->extensions) + return -EINVAL; + + switch (arg->observation_type) { + case DRM_XE_OBSERVATION_TYPE_OA: + return xe_oa_ioctl(dev, arg, file); + default: + return -EINVAL; + } +} + +static struct ctl_table observation_ctl_table[] = { + { + .procname = "observation_paranoid", + .data = &xe_observation_paranoid, + .maxlen = sizeof(xe_observation_paranoid), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + {} +}; + +/** + * xe_observation_sysctl_register - Register xe_observation_paranoid sysctl + * + * Normally only superuser/root can access observation stream + * data. However, superuser can set xe_observation_paranoid sysctl to 0 to + * allow non-privileged users to also access observation data. + * + * Return: always returns 0 + */ +int xe_observation_sysctl_register(void) +{ + sysctl_header = register_sysctl("dev/xe", observation_ctl_table); + return 0; +} + +/** + * xe_observation_sysctl_unregister - Unregister xe_observation_paranoid sysctl + */ +void xe_observation_sysctl_unregister(void) +{ + unregister_sysctl_table(sysctl_header); +} diff --git a/drivers/gpu/drm/xe/xe_observation.h b/drivers/gpu/drm/xe/xe_observation.h new file mode 100644 index 000000000000..17816998e966 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_observation.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023-2024 Intel Corporation + */ + +#ifndef _XE_OBSERVATION_H_ +#define _XE_OBSERVATION_H_ + +#include + +struct drm_device; +struct drm_file; + +extern u32 xe_observation_paranoid; + +int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int xe_observation_sysctl_register(void); +void xe_observation_sysctl_unregister(void); + +#endif diff --git a/drivers/gpu/drm/xe/xe_perf.c b/drivers/gpu/drm/xe/xe_perf.c deleted file mode 100644 index d6cd74cadf34..000000000000 --- a/drivers/gpu/drm/xe/xe_perf.c +++ /dev/null @@ -1,92 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2023-2024 Intel Corporation - */ - -#include -#include - -#include - -#include "xe_oa.h" -#include "xe_perf.h" - -u32 xe_perf_stream_paranoid = true; -static struct ctl_table_header *sysctl_header; - -static int xe_oa_ioctl(struct drm_device *dev, struct drm_xe_perf_param *arg, - struct drm_file *file) -{ - switch (arg->perf_op) { - case DRM_XE_PERF_OP_STREAM_OPEN: - return xe_oa_stream_open_ioctl(dev, arg->param, file); - case DRM_XE_PERF_OP_ADD_CONFIG: - return xe_oa_add_config_ioctl(dev, arg->param, file); - case DRM_XE_PERF_OP_REMOVE_CONFIG: - return xe_oa_remove_config_ioctl(dev, arg->param, file); - default: - return -EINVAL; - } -} - -/** - * xe_perf_ioctl - The top level perf layer ioctl - * @dev: @drm_device - * @data: pointer to struct @drm_xe_perf_param - * @file: @drm_file - * - * The function is called for different perf streams types and allows execution - * of different operations supported by those perf stream types. - * - * Return: 0 on success or a negative error code on failure. - */ -int xe_perf_ioctl(struct drm_device *dev, void *data, struct drm_file *file) -{ - struct drm_xe_perf_param *arg = data; - - if (arg->extensions) - return -EINVAL; - - switch (arg->perf_type) { - case DRM_XE_PERF_TYPE_OA: - return xe_oa_ioctl(dev, arg, file); - default: - return -EINVAL; - } -} - -static struct ctl_table perf_ctl_table[] = { - { - .procname = "perf_stream_paranoid", - .data = &xe_perf_stream_paranoid, - .maxlen = sizeof(xe_perf_stream_paranoid), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, - }, - {} -}; - -/** - * xe_perf_sysctl_register - Register "perf_stream_paranoid" sysctl - * - * Normally only superuser/root can access perf counter data. However, - * superuser can set perf_stream_paranoid sysctl to 0 to allow non-privileged - * users to also access perf data. - * - * Return: always returns 0 - */ -int xe_perf_sysctl_register(void) -{ - sysctl_header = register_sysctl("dev/xe", perf_ctl_table); - return 0; -} - -/** - * xe_perf_sysctl_unregister - Unregister "perf_stream_paranoid" sysctl - */ -void xe_perf_sysctl_unregister(void) -{ - unregister_sysctl_table(sysctl_header); -} diff --git a/drivers/gpu/drm/xe/xe_perf.h b/drivers/gpu/drm/xe/xe_perf.h deleted file mode 100644 index 53a8377a1bb1..000000000000 --- a/drivers/gpu/drm/xe/xe_perf.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023-2024 Intel Corporation - */ - -#ifndef _XE_PERF_H_ -#define _XE_PERF_H_ - -#include - -struct drm_device; -struct drm_file; - -extern u32 xe_perf_stream_paranoid; - -int xe_perf_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -int xe_perf_sysctl_register(void); -void xe_perf_sysctl_unregister(void); - -#endif diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 12eaa8532b5c..33544ef78d3e 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -80,7 +80,7 @@ extern "C" { * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY * - &DRM_IOCTL_XE_EXEC * - &DRM_IOCTL_XE_WAIT_USER_FENCE - * - &DRM_IOCTL_XE_PERF + * - &DRM_IOCTL_XE_OBSERVATION */ /* @@ -101,7 +101,7 @@ extern "C" { #define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08 #define DRM_XE_EXEC 0x09 #define DRM_XE_WAIT_USER_FENCE 0x0a -#define DRM_XE_PERF 0x0b +#define DRM_XE_OBSERVATION 0x0b /* Must be kept compact -- no holes */ @@ -116,7 +116,7 @@ extern "C" { #define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property) #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec) #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence) -#define DRM_IOCTL_XE_PERF DRM_IOW(DRM_COMMAND_BASE + DRM_XE_PERF, struct drm_xe_perf_param) +#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param) /** * DOC: Xe IOCTL Extensions @@ -1376,66 +1376,67 @@ struct drm_xe_wait_user_fence { }; /** - * enum drm_xe_perf_type - Perf stream types + * enum drm_xe_observation_type - Observation stream types */ -enum drm_xe_perf_type { - /** @DRM_XE_PERF_TYPE_OA: OA perf stream type */ - DRM_XE_PERF_TYPE_OA, +enum drm_xe_observation_type { + /** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */ + DRM_XE_OBSERVATION_TYPE_OA, }; /** - * enum drm_xe_perf_op - Perf stream ops + * enum drm_xe_observation_op - Observation stream ops */ -enum drm_xe_perf_op { - /** @DRM_XE_PERF_OP_STREAM_OPEN: Open a perf counter stream */ - DRM_XE_PERF_OP_STREAM_OPEN, +enum drm_xe_observation_op { + /** @DRM_XE_OBSERVATION_OP_STREAM_OPEN: Open an observation stream */ + DRM_XE_OBSERVATION_OP_STREAM_OPEN, - /** @DRM_XE_PERF_OP_ADD_CONFIG: Add perf stream config */ - DRM_XE_PERF_OP_ADD_CONFIG, + /** @DRM_XE_OBSERVATION_OP_ADD_CONFIG: Add observation stream config */ + DRM_XE_OBSERVATION_OP_ADD_CONFIG, - /** @DRM_XE_PERF_OP_REMOVE_CONFIG: Remove perf stream config */ - DRM_XE_PERF_OP_REMOVE_CONFIG, + /** @DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: Remove observation stream config */ + DRM_XE_OBSERVATION_OP_REMOVE_CONFIG, }; /** - * struct drm_xe_perf_param - Input of &DRM_XE_PERF + * struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION * - * The perf layer enables multiplexing perf counter streams of multiple - * types. The actual params for a particular stream operation are supplied - * via the @param pointer (use __copy_from_user to get these params). + * The observation layer enables multiplexing observation streams of + * multiple types. The actual params for a particular stream operation are + * supplied via the @param pointer (use __copy_from_user to get these + * params). */ -struct drm_xe_perf_param { +struct drm_xe_observation_param { /** @extensions: Pointer to the first extension struct, if any */ __u64 extensions; - /** @perf_type: Perf stream type, of enum @drm_xe_perf_type */ - __u64 perf_type; - /** @perf_op: Perf op, of enum @drm_xe_perf_op */ - __u64 perf_op; + /** @observation_type: observation stream type, of enum @drm_xe_observation_type */ + __u64 observation_type; + /** @observation_op: observation stream op, of enum @drm_xe_observation_op */ + __u64 observation_op; /** @param: Pointer to actual stream params */ __u64 param; }; /** - * enum drm_xe_perf_ioctls - Perf fd ioctl's + * enum drm_xe_observation_ioctls - Observation stream fd ioctl's * - * Information exchanged between userspace and kernel for perf fd ioctl's - * is stream type specific + * Information exchanged between userspace and kernel for observation fd + * ioctl's is stream type specific */ -enum drm_xe_perf_ioctls { - /** @DRM_XE_PERF_IOCTL_ENABLE: Enable data capture for a stream */ - DRM_XE_PERF_IOCTL_ENABLE = _IO('i', 0x0), +enum drm_xe_observation_ioctls { + /** @DRM_XE_OBSERVATION_IOCTL_ENABLE: Enable data capture for an observation stream */ + DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0), - /** @DRM_XE_PERF_IOCTL_DISABLE: Disable data capture for a stream */ - DRM_XE_PERF_IOCTL_DISABLE = _IO('i', 0x1), + /** @DRM_XE_OBSERVATION_IOCTL_DISABLE: Disable data capture for a observation stream */ + DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1), - /** @DRM_XE_PERF_IOCTL_CONFIG: Change stream configuration */ - DRM_XE_PERF_IOCTL_CONFIG = _IO('i', 0x2), + /** @DRM_XE_OBSERVATION_IOCTL_CONFIG: Change observation stream configuration */ + DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2), - /** @DRM_XE_PERF_IOCTL_STATUS: Return stream status */ - DRM_XE_PERF_IOCTL_STATUS = _IO('i', 0x3), + /** @DRM_XE_OBSERVATION_IOCTL_STATUS: Return observation stream status */ + DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3), - /** @DRM_XE_PERF_IOCTL_INFO: Return stream info */ - DRM_XE_PERF_IOCTL_INFO = _IO('i', 0x4), + /** @DRM_XE_OBSERVATION_IOCTL_INFO: Return observation stream info */ + DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4), }; /** @@ -1546,12 +1547,12 @@ enum drm_xe_oa_format_type { * Stream params are specified as a chain of @drm_xe_ext_set_property * struct's, with @property values from enum @drm_xe_oa_property_id and * @drm_xe_user_extension base.name set to @DRM_XE_OA_EXTENSION_SET_PROPERTY. - * @param field in struct @drm_xe_perf_param points to the first + * @param field in struct @drm_xe_observation_param points to the first * @drm_xe_ext_set_property struct. * - * Exactly the same mechanism is also used for stream reconfiguration using - * the @DRM_XE_PERF_IOCTL_CONFIG perf fd ioctl, though only a subset of - * properties below can be specified for stream reconfiguration. + * Exactly the same mechanism is also used for stream reconfiguration using the + * @DRM_XE_OBSERVATION_IOCTL_CONFIG observation stream fd ioctl, though only a + * subset of properties below can be specified for stream reconfiguration. */ enum drm_xe_oa_property_id { #define DRM_XE_OA_EXTENSION_SET_PROPERTY 0 @@ -1571,11 +1572,11 @@ enum drm_xe_oa_property_id { /** * @DRM_XE_OA_PROPERTY_OA_METRIC_SET: OA metrics defining contents of OA - * reports, previously added via @DRM_XE_PERF_OP_ADD_CONFIG. + * reports, previously added via @DRM_XE_OBSERVATION_OP_ADD_CONFIG. */ DRM_XE_OA_PROPERTY_OA_METRIC_SET, - /** @DRM_XE_OA_PROPERTY_OA_FORMAT: Perf counter report format */ + /** @DRM_XE_OA_PROPERTY_OA_FORMAT: OA counter report format */ DRM_XE_OA_PROPERTY_OA_FORMAT, /* * OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942, @@ -1596,13 +1597,13 @@ enum drm_xe_oa_property_id { /** * @DRM_XE_OA_PROPERTY_OA_DISABLED: A value of 1 will open the OA - * stream in a DISABLED state (see @DRM_XE_PERF_IOCTL_ENABLE). + * stream in a DISABLED state (see @DRM_XE_OBSERVATION_IOCTL_ENABLE). */ DRM_XE_OA_PROPERTY_OA_DISABLED, /** * @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID: Open the stream for a specific - * @exec_queue_id. Perf queries can be executed on this exec queue. + * @exec_queue_id. OA queries can be executed on this exec queue. */ DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID, @@ -1622,7 +1623,7 @@ enum drm_xe_oa_property_id { /** * struct drm_xe_oa_config - OA metric configuration * - * Multiple OA configs can be added using @DRM_XE_PERF_OP_ADD_CONFIG. A + * Multiple OA configs can be added using @DRM_XE_OBSERVATION_OP_ADD_CONFIG. A * particular config can be specified when opening an OA stream using * @DRM_XE_OA_PROPERTY_OA_METRIC_SET property. */ @@ -1645,8 +1646,9 @@ struct drm_xe_oa_config { /** * struct drm_xe_oa_stream_status - OA stream status returned from - * @DRM_XE_PERF_IOCTL_STATUS perf fd ioctl. Userspace can call the ioctl to - * query stream status in response to EIO errno from perf fd read(). + * @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl. Userspace can + * call the ioctl to query stream status in response to EIO errno from + * observation fd read(). */ struct drm_xe_oa_stream_status { /** @extensions: Pointer to the first extension struct, if any */ @@ -1665,7 +1667,7 @@ struct drm_xe_oa_stream_status { /** * struct drm_xe_oa_stream_info - OA stream info returned from - * @DRM_XE_PERF_IOCTL_INFO perf fd ioctl + * @DRM_XE_OBSERVATION_IOCTL_INFO observation stream fd ioctl */ struct drm_xe_oa_stream_info { /** @extensions: Pointer to the first extension struct, if any */ From 67d90d679eb3447f73e2a1fe55f7e6c6a44c9fa1 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 3 Jul 2024 21:16:46 -0700 Subject: [PATCH 04/95] drm/xe: s/xe_tile_migrate_engine/xe_tile_migrate_exec_queue Engine is old nomenclature, replace with exec queue. Signed-off-by: Matthew Brost Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-2-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_migrate.c | 9 ++++----- drivers/gpu/drm/xe/xe_migrate.h | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index c9f5673353ee..ef5ad0efc5dd 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -84,15 +84,14 @@ struct xe_migrate { #define MAX_PTE_PER_SDI 0x1FE /** - * xe_tile_migrate_engine() - Get this tile's migrate engine. + * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue. * @tile: The tile. * - * Returns the default migrate engine of this tile. - * TODO: Perhaps this function is slightly misplaced, and even unneeded? + * Returns the default migrate exec queue of this tile. * - * Return: The default migrate engine + * Return: The default migrate exec queue */ -struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile) +struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile) { return tile->migrate->q; } diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h index 951f19318ea4..a5bcaafe4a99 100644 --- a/drivers/gpu/drm/xe/xe_migrate.h +++ b/drivers/gpu/drm/xe/xe_migrate.h @@ -106,5 +106,5 @@ xe_migrate_update_pgtables(struct xe_migrate *m, void xe_migrate_wait(struct xe_migrate *m); -struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile); +struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile); #endif From 2e524668c440104633af1effcc85d1d6234c7ccf Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 3 Jul 2024 21:16:47 -0700 Subject: [PATCH 05/95] drm/xe: Add xe_vm_pgtable_update_op to xe_vma_ops Each xe_vma_op resolves to 0-3 pt_ops. Add storage for the pt_ops to xe_vma_ops which is dynamically allocated based the number and types of xe_vma_op in the xe_vma_ops list. Allocation only implemented in this patch. This will help with converting xe_vma_ops (multiple xe_vma_op) in a atomic update unit. Signed-off-by: Matthew Brost Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-3-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_pt_types.h | 12 ++++++ drivers/gpu/drm/xe/xe_vm.c | 66 +++++++++++++++++++++++++++++++- drivers/gpu/drm/xe/xe_vm_types.h | 8 ++++ 3 files changed, 84 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h index cee70cb0f014..2093150f461e 100644 --- a/drivers/gpu/drm/xe/xe_pt_types.h +++ b/drivers/gpu/drm/xe/xe_pt_types.h @@ -74,4 +74,16 @@ struct xe_vm_pgtable_update { u32 flags; }; +/** struct xe_vm_pgtable_update_op - Page table update operation */ +struct xe_vm_pgtable_update_op { + /** @entries: entries to update for this operation */ + struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1]; + /** @num_entries: number of entries for this update operation */ + u32 num_entries; + /** @bind: is a bind */ + bool bind; + /** @rebind: is a rebind */ + bool rebind; +}; + #endif diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 0c764647a552..6677874af5a4 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -718,6 +718,42 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm) list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; } +static int xe_vma_ops_alloc(struct xe_vma_ops *vops) +{ + int i; + + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) { + if (!vops->pt_update_ops[i].num_ops) + continue; + + vops->pt_update_ops[i].ops = + kmalloc_array(vops->pt_update_ops[i].num_ops, + sizeof(*vops->pt_update_ops[i].ops), + GFP_KERNEL); + if (!vops->pt_update_ops[i].ops) + return -ENOMEM; + } + + return 0; +} + +static void xe_vma_ops_fini(struct xe_vma_ops *vops) +{ + int i; + + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) + kfree(vops->pt_update_ops[i].ops); +} + +static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask) +{ + int i; + + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) + if (BIT(i) & tile_mask) + ++vops->pt_update_ops[i].num_ops; +} + static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma, u8 tile_mask) { @@ -745,6 +781,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma, xe_vm_populate_rebind(op, vma, tile_mask); list_add_tail(&op->link, &vops->list); + xe_vma_ops_incr_pt_update_ops(vops, tile_mask); return 0; } @@ -785,6 +822,10 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) goto free_ops; } + err = xe_vma_ops_alloc(&vops); + if (err) + goto free_ops; + fence = ops_execute(vm, &vops); if (IS_ERR(fence)) { err = PTR_ERR(fence); @@ -799,6 +840,7 @@ free_ops: list_del(&op->link); kfree(op); } + xe_vma_ops_fini(&vops); return err; } @@ -820,12 +862,20 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma if (err) return ERR_PTR(err); + err = xe_vma_ops_alloc(&vops); + if (err) { + fence = ERR_PTR(err); + goto free_ops; + } + fence = ops_execute(vm, &vops); +free_ops: list_for_each_entry_safe(op, next_op, &vops.list, link) { list_del(&op->link); kfree(op); } + xe_vma_ops_fini(&vops); return fence; } @@ -2287,7 +2337,6 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) return err; } - static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, struct drm_gpuva_ops *ops, struct xe_sync_entry *syncs, u32 num_syncs, @@ -2339,6 +2388,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, return PTR_ERR(vma); op->map.vma = vma; + if (op->map.immediate || !xe_vm_in_fault_mode(vm)) + xe_vma_ops_incr_pt_update_ops(vops, + op->tile_mask); break; } case DRM_GPUVA_OP_REMAP: @@ -2383,6 +2435,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx", (ULL)op->remap.start, (ULL)op->remap.range); + } else { + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); } } @@ -2419,13 +2473,16 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx", (ULL)op->remap.start, (ULL)op->remap.range); + } else { + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); } } + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break; } case DRM_GPUVA_OP_UNMAP: case DRM_GPUVA_OP_PREFETCH: - /* Nothing to do */ + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break; default: drm_warn(&vm->xe->drm, "NOT POSSIBLE"); @@ -3272,11 +3329,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto unwind_ops; } + err = xe_vma_ops_alloc(&vops); + if (err) + goto unwind_ops; + err = vm_bind_ioctl_ops_execute(vm, &vops); unwind_ops: if (err && err != -ENODATA) vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); + xe_vma_ops_fini(&vops); for (i = args->num_binds - 1; i >= 0; --i) if (ops[i]) drm_gpuva_ops_free(&vm->gpuvm, ops[i]); diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index ce1a63a5e3e7..211c88801182 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -21,6 +21,7 @@ struct xe_bo; struct xe_sync_entry; struct xe_user_fence; struct xe_vm; +struct xe_vm_pgtable_update_op; #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1) @@ -368,6 +369,13 @@ struct xe_vma_ops { struct xe_sync_entry *syncs; /** @num_syncs: number of syncs */ u32 num_syncs; + /** @pt_update_ops: page table update operations */ + struct { + /** @ops: operations */ + struct xe_vm_pgtable_update_op *ops; + /** @num_ops: number of operations */ + u32 num_ops; + } pt_update_ops[XE_MAX_TILES_PER_DEVICE]; }; #endif From 96e7ebb220f8a873321cfc5a87bc4533d36ec444 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 3 Jul 2024 21:16:48 -0700 Subject: [PATCH 06/95] drm/xe: Add xe_exec_queue_last_fence_test_dep Helpful to determine if a bind can immediately use CPU or needs to be deferred a drm scheduler job. v7: - Better wording in kernel doc (Matthew Auld) Signed-off-by: Matthew Brost Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-4-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_exec_queue.c | 23 +++++++++++++++++++++++ drivers/gpu/drm/xe/xe_exec_queue.h | 2 ++ 2 files changed, 25 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 0ba37835849b..3336a01a1006 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -906,3 +906,26 @@ void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, xe_exec_queue_last_fence_put(q, vm); q->last_fence = dma_fence_get(fence); } + +/** + * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue + * @q: The exec queue + * @vm: The VM the engine does a bind or exec for + * + * Returns: + * -ETIME if there exists an unsignalled last fence dependency, zero otherwise. + */ +int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm) +{ + struct dma_fence *fence; + int err = 0; + + fence = xe_exec_queue_last_fence_get(q, vm); + if (fence) { + err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? + 0 : -ETIME; + dma_fence_put(fence); + } + + return err; +} diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h index 289a3a51d2a2..ded77b0f3b90 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.h +++ b/drivers/gpu/drm/xe/xe_exec_queue.h @@ -75,6 +75,8 @@ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e, struct xe_vm *vm); void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm, struct dma_fence *fence); +int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, + struct xe_vm *vm); void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q); #endif From e8babb280b5ef904df54b3a90e5a7e3a9600c4a9 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 3 Jul 2024 21:16:49 -0700 Subject: [PATCH 07/95] drm/xe: Convert multiple bind ops into single job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This aligns with the uAPI of an array of binds or single bind that results in multiple GPUVA ops to be considered a single atomic operations. The design is roughly: - xe_vma_ops is a list of xe_vma_op (GPUVA op) - each xe_vma_op resolves to 0-3 PT ops - xe_vma_ops creates a single job - if at any point during binding a failure occurs, xe_vma_ops contains the information necessary unwind the PT and VMA (GPUVA) state v2: - add missing dma-resv slot reservation (CI, testing) v4: - Fix TLB invalidation (Paulo) - Add missing xe_sched_job_last_fence_add/test_dep check (Inspection) v5: - Invert i, j usage (Matthew Auld) - Add helper to test and add job dep (Matthew Auld) - Return on anything but -ETIME for cpu bind (Matthew Auld) - Return -ENOBUFS if suballoc of BB fails due to size (Matthew Auld) - s/do/Do (Matthew Auld) - Add missing comma (Matthew Auld) - Do not assign return value to xe_range_fence_insert (Matthew Auld) v6: - s/0x1ff/MAX_PTE_PER_SDI (Matthew Auld, CI) - Check to large of SA in Xe to avoid triggering WARN (Matthew Auld) - Fix checkpatch issues v7: - Rebase - Support more than 510 PTEs updates in a bind job (Paulo, mesa testing) v8: - Rebase Cc: Thomas Hellström Signed-off-by: Matthew Brost Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-5-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_bo_types.h | 2 + drivers/gpu/drm/xe/xe_migrate.c | 329 +++++---- drivers/gpu/drm/xe/xe_migrate.h | 32 +- drivers/gpu/drm/xe/xe_pt.c | 1126 +++++++++++++++++++----------- drivers/gpu/drm/xe/xe_pt.h | 14 +- drivers/gpu/drm/xe/xe_pt_types.h | 36 + drivers/gpu/drm/xe/xe_sa.c | 7 + drivers/gpu/drm/xe/xe_vm.c | 521 +++----------- drivers/gpu/drm/xe/xe_vm.h | 2 + drivers/gpu/drm/xe/xe_vm_types.h | 45 +- 10 files changed, 1076 insertions(+), 1038 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 86422e113d39..02d68873558a 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -58,6 +58,8 @@ struct xe_bo { #endif /** @freed: List node for delayed put. */ struct llist_node freed; + /** @update_index: Update index if PT BO */ + int update_index; /** @created: Whether the bo has passed initial creation */ bool created; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index ef5ad0efc5dd..fa23a7e7ec43 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1125,6 +1125,7 @@ err_sync: } static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, + const struct xe_vm_pgtable_update_op *pt_op, const struct xe_vm_pgtable_update *update, struct xe_migrate_pt_update *pt_update) { @@ -1159,8 +1160,12 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); bb->cs[bb->len++] = lower_32_bits(addr); bb->cs[bb->len++] = upper_32_bits(addr); - ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk, - update); + if (pt_op->bind) + ops->populate(pt_update, tile, NULL, bb->cs + bb->len, + ofs, chunk, update); + else + ops->clear(pt_update, tile, NULL, bb->cs + bb->len, + ofs, chunk, update); bb->len += chunk * 2; ofs += chunk; @@ -1185,114 +1190,58 @@ struct migrate_test_params { static struct dma_fence * xe_migrate_update_pgtables_cpu(struct xe_migrate *m, - struct xe_vm *vm, struct xe_bo *bo, - const struct xe_vm_pgtable_update *updates, - u32 num_updates, bool wait_vm, struct xe_migrate_pt_update *pt_update) { XE_TEST_DECLARE(struct migrate_test_params *test = to_migrate_test_params (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));) const struct xe_migrate_pt_update_ops *ops = pt_update->ops; - struct dma_fence *fence; + struct xe_vm *vm = pt_update->vops->vm; + struct xe_vm_pgtable_update_ops *pt_update_ops = + &pt_update->vops->pt_update_ops[pt_update->tile_id]; int err; - u32 i; + u32 i, j; if (XE_TEST_ONLY(test && test->force_gpu)) return ERR_PTR(-ETIME); - if (bo && !dma_resv_test_signaled(bo->ttm.base.resv, - DMA_RESV_USAGE_KERNEL)) - return ERR_PTR(-ETIME); - - if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP)) - return ERR_PTR(-ETIME); - if (ops->pre_commit) { pt_update->job = NULL; err = ops->pre_commit(pt_update); if (err) return ERR_PTR(err); } - for (i = 0; i < num_updates; i++) { - const struct xe_vm_pgtable_update *update = &updates[i]; - ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL, - update->ofs, update->qwords, update); - } + for (i = 0; i < pt_update_ops->num_ops; ++i) { + const struct xe_vm_pgtable_update_op *pt_op = + &pt_update_ops->ops[i]; - if (vm) { - trace_xe_vm_cpu_bind(vm); - xe_device_wmb(vm->xe); - } + for (j = 0; j < pt_op->num_entries; j++) { + const struct xe_vm_pgtable_update *update = + &pt_op->entries[j]; - fence = dma_fence_get_stub(); - - return fence; -} - -static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs) -{ - struct dma_fence *fence; - int i; - - for (i = 0; i < num_syncs; i++) { - fence = syncs[i].fence; - - if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &fence->flags)) - return false; - } - if (q) { - fence = xe_exec_queue_last_fence_get(q, vm); - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - dma_fence_put(fence); - return false; + if (pt_op->bind) + ops->populate(pt_update, m->tile, + &update->pt_bo->vmap, NULL, + update->ofs, update->qwords, + update); + else + ops->clear(pt_update, m->tile, + &update->pt_bo->vmap, NULL, + update->ofs, update->qwords, update); } - dma_fence_put(fence); } - return true; + trace_xe_vm_cpu_bind(vm); + xe_device_wmb(vm->xe); + + return dma_fence_get_stub(); } -/** - * xe_migrate_update_pgtables() - Pipelined page-table update - * @m: The migrate context. - * @vm: The vm we'll be updating. - * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr. - * @q: The exec queue to be used for the update or NULL if the default - * migration engine is to be used. - * @updates: An array of update descriptors. - * @num_updates: Number of descriptors in @updates. - * @syncs: Array of xe_sync_entry to await before updating. Note that waits - * will block the engine timeline. - * @num_syncs: Number of entries in @syncs. - * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains - * pointers to callback functions and, if subclassed, private arguments to - * those. - * - * Perform a pipelined page-table update. The update descriptors are typically - * built under the same lock critical section as a call to this function. If - * using the default engine for the updates, they will be performed in the - * order they grab the job_mutex. If different engines are used, external - * synchronization is needed for overlapping updates to maintain page-table - * consistency. Note that the meaing of "overlapping" is that the updates - * touch the same page-table, which might be a higher-level page-directory. - * If no pipelining is needed, then updates may be performed by the cpu. - * - * Return: A dma_fence that, when signaled, indicates the update completion. - */ -struct dma_fence * -xe_migrate_update_pgtables(struct xe_migrate *m, - struct xe_vm *vm, - struct xe_bo *bo, - struct xe_exec_queue *q, - const struct xe_vm_pgtable_update *updates, - u32 num_updates, - struct xe_sync_entry *syncs, u32 num_syncs, - struct xe_migrate_pt_update *pt_update) +static struct dma_fence * +__xe_migrate_update_pgtables(struct xe_migrate *m, + struct xe_migrate_pt_update *pt_update, + struct xe_vm_pgtable_update_ops *pt_update_ops) { const struct xe_migrate_pt_update_ops *ops = pt_update->ops; struct xe_tile *tile = m->tile; @@ -1301,59 +1250,53 @@ xe_migrate_update_pgtables(struct xe_migrate *m, struct xe_sched_job *job; struct dma_fence *fence; struct drm_suballoc *sa_bo = NULL; - struct xe_vma *vma = pt_update->vma; struct xe_bb *bb; - u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0; + u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0; + u32 num_updates = 0, current_update = 0; u64 addr; int err = 0; - bool usm = !q && xe->info.has_usm; - bool first_munmap_rebind = vma && - vma->gpuva.flags & XE_VMA_FIRST_REBIND; - struct xe_exec_queue *q_override = !q ? m->q : q; - u16 pat_index = xe->pat.idx[XE_CACHE_WB]; + bool is_migrate = pt_update_ops->q == m->q; + bool usm = is_migrate && xe->info.has_usm; - /* Use the CPU if no in syncs and engine is idle */ - if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) { - fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates, - num_updates, - first_munmap_rebind, - pt_update); - if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN)) - return fence; + for (i = 0; i < pt_update_ops->num_ops; ++i) { + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; + struct xe_vm_pgtable_update *updates = pt_op->entries; + + num_updates += pt_op->num_entries; + for (j = 0; j < pt_op->num_entries; ++j) { + u32 num_cmds = DIV_ROUND_UP(updates[j].qwords, + MAX_PTE_PER_SDI); + + /* align noop + MI_STORE_DATA_IMM cmd prefix */ + batch_size += 4 * num_cmds + updates[j].qwords * 2; + } } /* fixed + PTE entries */ if (IS_DGFX(xe)) - batch_size = 2; + batch_size += 2; else - batch_size = 6 + num_updates * 2; + batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) + + num_updates * 2; - for (i = 0; i < num_updates; i++) { - u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI); - - /* align noop + MI_STORE_DATA_IMM cmd prefix */ - batch_size += 4 * num_cmds + updates[i].qwords * 2; - } - - /* - * XXX: Create temp bo to copy from, if batch_size becomes too big? - * - * Worst case: Sum(2 * (each lower level page size) + (top level page size)) - * Should be reasonably bound.. - */ - xe_tile_assert(tile, batch_size < SZ_128K); - - bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm); + bb = xe_bb_new(gt, batch_size, usm); if (IS_ERR(bb)) return ERR_CAST(bb); /* For sysmem PTE's, need to map them in our hole.. */ if (!IS_DGFX(xe)) { - ppgtt_ofs = NUM_KERNEL_PDE - 1; - if (q) { - xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT); + u32 ptes, ofs; - sa_bo = drm_suballoc_new(&m->vm_update_sa, 1, + ppgtt_ofs = NUM_KERNEL_PDE - 1; + if (!is_migrate) { + u32 num_units = DIV_ROUND_UP(num_updates, + NUM_VMUSA_WRITES_PER_UNIT); + + if (num_units > m->vm_update_sa.size) { + err = -ENOBUFS; + goto err_bb; + } + sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units, GFP_KERNEL, true, 0); if (IS_ERR(sa_bo)) { err = PTR_ERR(sa_bo); @@ -1369,18 +1312,49 @@ xe_migrate_update_pgtables(struct xe_migrate *m, } /* Map our PT's to gtt */ - bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates); - bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs; - bb->cs[bb->len++] = 0; /* upper_32_bits */ + i = 0; + j = 0; + ptes = num_updates; + ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs; + while (ptes) { + u32 chunk = min(MAX_PTE_PER_SDI, ptes); + u32 idx = 0; - for (i = 0; i < num_updates; i++) { - struct xe_bo *pt_bo = updates[i].pt_bo; + bb->cs[bb->len++] = MI_STORE_DATA_IMM | + MI_SDI_NUM_QW(chunk); + bb->cs[bb->len++] = ofs; + bb->cs[bb->len++] = 0; /* upper_32_bits */ - xe_tile_assert(tile, pt_bo->size == SZ_4K); + for (; i < pt_update_ops->num_ops; ++i) { + struct xe_vm_pgtable_update_op *pt_op = + &pt_update_ops->ops[i]; + struct xe_vm_pgtable_update *updates = pt_op->entries; - addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0); - bb->cs[bb->len++] = lower_32_bits(addr); - bb->cs[bb->len++] = upper_32_bits(addr); + for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) { + struct xe_vm *vm = pt_update->vops->vm; + struct xe_bo *pt_bo = updates[j].pt_bo; + + if (idx == chunk) + goto next_cmd; + + xe_tile_assert(tile, pt_bo->size == SZ_4K); + + /* Map a PT at most once */ + if (pt_bo->update_index < 0) + pt_bo->update_index = current_update; + + addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, + XE_CACHE_WB, 0); + bb->cs[bb->len++] = lower_32_bits(addr); + bb->cs[bb->len++] = upper_32_bits(addr); + } + + j = 0; + } + +next_cmd: + ptes -= chunk; + ofs += chunk * sizeof(u64); } bb->cs[bb->len++] = MI_BATCH_BUFFER_END; @@ -1388,19 +1362,36 @@ xe_migrate_update_pgtables(struct xe_migrate *m, addr = xe_migrate_vm_addr(ppgtt_ofs, 0) + (page_ofs / sizeof(u64)) * XE_PAGE_SIZE; - for (i = 0; i < num_updates; i++) - write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE, - &updates[i], pt_update); + for (i = 0; i < pt_update_ops->num_ops; ++i) { + struct xe_vm_pgtable_update_op *pt_op = + &pt_update_ops->ops[i]; + struct xe_vm_pgtable_update *updates = pt_op->entries; + + for (j = 0; j < pt_op->num_entries; ++j) { + struct xe_bo *pt_bo = updates[j].pt_bo; + + write_pgtable(tile, bb, addr + + pt_bo->update_index * XE_PAGE_SIZE, + pt_op, &updates[j], pt_update); + } + } } else { /* phys pages, no preamble required */ bb->cs[bb->len++] = MI_BATCH_BUFFER_END; update_idx = bb->len; - for (i = 0; i < num_updates; i++) - write_pgtable(tile, bb, 0, &updates[i], pt_update); + for (i = 0; i < pt_update_ops->num_ops; ++i) { + struct xe_vm_pgtable_update_op *pt_op = + &pt_update_ops->ops[i]; + struct xe_vm_pgtable_update *updates = pt_op->entries; + + for (j = 0; j < pt_op->num_entries; ++j) + write_pgtable(tile, bb, 0, pt_op, &updates[j], + pt_update); + } } - job = xe_bb_create_migration_job(q ?: m->q, bb, + job = xe_bb_create_migration_job(pt_update_ops->q, bb, xe_migrate_batch_base(m, usm), update_idx); if (IS_ERR(job)) { @@ -1408,46 +1399,20 @@ xe_migrate_update_pgtables(struct xe_migrate *m, goto err_sa; } - /* Wait on BO move */ - if (bo) { - err = xe_sched_job_add_deps(job, bo->ttm.base.resv, - DMA_RESV_USAGE_KERNEL); - if (err) - goto err_job; - } - - /* - * Munmap style VM unbind, need to wait for all jobs to be complete / - * trigger preempts before moving forward - */ - if (first_munmap_rebind) { - err = xe_sched_job_add_deps(job, xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP); - if (err) - goto err_job; - } - - err = xe_sched_job_last_fence_add_dep(job, vm); - for (i = 0; !err && i < num_syncs; i++) - err = xe_sync_entry_add_deps(&syncs[i], job); - - if (err) - goto err_job; - if (ops->pre_commit) { pt_update->job = job; err = ops->pre_commit(pt_update); if (err) goto err_job; } - if (!q) + if (is_migrate) mutex_lock(&m->job_mutex); xe_sched_job_arm(job); fence = dma_fence_get(&job->drm.s_fence->finished); xe_sched_job_push(job); - if (!q) + if (is_migrate) mutex_unlock(&m->job_mutex); xe_bb_free(bb, fence); @@ -1464,6 +1429,40 @@ err_bb: return ERR_PTR(err); } +/** + * xe_migrate_update_pgtables() - Pipelined page-table update + * @m: The migrate context. + * @pt_update: PT update arguments + * + * Perform a pipelined page-table update. The update descriptors are typically + * built under the same lock critical section as a call to this function. If + * using the default engine for the updates, they will be performed in the + * order they grab the job_mutex. If different engines are used, external + * synchronization is needed for overlapping updates to maintain page-table + * consistency. Note that the meaing of "overlapping" is that the updates + * touch the same page-table, which might be a higher-level page-directory. + * If no pipelining is needed, then updates may be performed by the cpu. + * + * Return: A dma_fence that, when signaled, indicates the update completion. + */ +struct dma_fence * +xe_migrate_update_pgtables(struct xe_migrate *m, + struct xe_migrate_pt_update *pt_update) + +{ + struct xe_vm_pgtable_update_ops *pt_update_ops = + &pt_update->vops->pt_update_ops[pt_update->tile_id]; + struct dma_fence *fence; + + fence = xe_migrate_update_pgtables_cpu(m, pt_update); + + /* -ETIME indicates a job is needed, anything else is legit error */ + if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME) + return fence; + + return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops); +} + /** * xe_migrate_wait() - Complete all operations using the xe_migrate context * @m: Migrate context to wait for. diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h index a5bcaafe4a99..453e0ecf5034 100644 --- a/drivers/gpu/drm/xe/xe_migrate.h +++ b/drivers/gpu/drm/xe/xe_migrate.h @@ -47,6 +47,24 @@ struct xe_migrate_pt_update_ops { struct xe_tile *tile, struct iosys_map *map, void *pos, u32 ofs, u32 num_qwords, const struct xe_vm_pgtable_update *update); + /** + * @clear: Clear a command buffer or page-table with ptes. + * @pt_update: Embeddable callback argument. + * @tile: The tile for the current operation. + * @map: struct iosys_map into the memory to be populated. + * @pos: If @map is NULL, map into the memory to be populated. + * @ofs: qword offset into @map, unused if @map is NULL. + * @num_qwords: Number of qwords to write. + * @update: Information about the PTEs to be inserted. + * + * This interface is intended to be used as a callback into the + * page-table system to populate command buffers or shared + * page-tables with PTEs. + */ + void (*clear)(struct xe_migrate_pt_update *pt_update, + struct xe_tile *tile, struct iosys_map *map, + void *pos, u32 ofs, u32 num_qwords, + const struct xe_vm_pgtable_update *update); /** * @pre_commit: Callback to be called just before arming the @@ -67,14 +85,10 @@ struct xe_migrate_pt_update_ops { struct xe_migrate_pt_update { /** @ops: Pointer to the struct xe_migrate_pt_update_ops callbacks */ const struct xe_migrate_pt_update_ops *ops; - /** @vma: The vma we're updating the pagetable for. */ - struct xe_vma *vma; + /** @vops: VMA operations */ + struct xe_vma_ops *vops; /** @job: The job if a GPU page-table update. NULL otherwise */ struct xe_sched_job *job; - /** @start: Start of update for the range fence */ - u64 start; - /** @last: Last of update for the range fence */ - u64 last; /** @tile_id: Tile ID of the update */ u8 tile_id; }; @@ -96,12 +110,6 @@ struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m); struct dma_fence * xe_migrate_update_pgtables(struct xe_migrate *m, - struct xe_vm *vm, - struct xe_bo *bo, - struct xe_exec_queue *q, - const struct xe_vm_pgtable_update *updates, - u32 num_updates, - struct xe_sync_entry *syncs, u32 num_syncs, struct xe_migrate_pt_update *pt_update); void xe_migrate_wait(struct xe_migrate *m); diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index ade9e7a3a0ad..f46f46d46819 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -9,12 +9,15 @@ #include "xe_bo.h" #include "xe_device.h" #include "xe_drm_client.h" +#include "xe_exec_queue.h" #include "xe_gt.h" #include "xe_gt_tlb_invalidation.h" #include "xe_migrate.h" #include "xe_pt_types.h" #include "xe_pt_walk.h" #include "xe_res_cursor.h" +#include "xe_sched_job.h" +#include "xe_sync.h" #include "xe_trace.h" #include "xe_ttm_stolen_mgr.h" #include "xe_vm.h" @@ -325,6 +328,7 @@ xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent, entry->pt = parent; entry->flags = 0; entry->qwords = 0; + entry->pt_bo->update_index = -1; if (alloc_entries) { entry->pt_entries = kmalloc_array(XE_PDES, @@ -864,9 +868,7 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma) lockdep_assert_held(&vm->lock); - if (xe_vma_is_userptr(vma)) - lockdep_assert_held_read(&vm->userptr.notifier_lock); - else if (!xe_vma_is_null(vma)) + if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma)) dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv); xe_vm_assert_held(vm); @@ -888,10 +890,8 @@ static void xe_pt_commit_bind(struct xe_vma *vma, if (!rebind) pt->num_live += entries[i].qwords; - if (!pt->level) { - kfree(entries[i].pt_entries); + if (!pt->level) continue; - } pt_dir = as_xe_pt_dir(pt); for (j = 0; j < entries[i].qwords; j++) { @@ -904,10 +904,18 @@ static void xe_pt_commit_bind(struct xe_vma *vma, pt_dir->children[j_] = &newpte->base; } - kfree(entries[i].pt_entries); } } +static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries, + u32 num_entries) +{ + u32 i; + + for (i = 0; i < num_entries; i++) + kfree(entries[i].pt_entries); +} + static int xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, struct xe_vm_pgtable_update *entries, u32 *num_entries) @@ -926,12 +934,13 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, static void xe_vm_dbg_print_entries(struct xe_device *xe, const struct xe_vm_pgtable_update *entries, - unsigned int num_entries) + unsigned int num_entries, bool bind) #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)) { unsigned int i; - vm_dbg(&xe->drm, "%u entries to update\n", num_entries); + vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind", + num_entries); for (i = 0; i < num_entries; i++) { const struct xe_vm_pgtable_update *entry = &entries[i]; struct xe_pt *xe_pt = entry->pt; @@ -952,66 +961,108 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe, {} #endif -#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT - -static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) +static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs) { - u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2; - static u32 count; + int i; - if (count++ % divisor == divisor - 1) { - struct xe_vm *vm = xe_vma_vm(&uvma->vma); + for (i = 0; i < num_syncs; i++) { + struct dma_fence *fence = syncs[i].fence; - uvma->userptr.divisor = divisor << 1; - spin_lock(&vm->userptr.invalidated_lock); - list_move_tail(&uvma->userptr.invalidate_link, - &vm->userptr.invalidated); - spin_unlock(&vm->userptr.invalidated_lock); - return true; + if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags)) + return false; } - return false; + return true; } -#else - -static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) +static int job_test_add_deps(struct xe_sched_job *job, + struct dma_resv *resv, + enum dma_resv_usage usage) { - return false; + if (!job) { + if (!dma_resv_test_signaled(resv, usage)) + return -ETIME; + + return 0; + } + + return xe_sched_job_add_deps(job, resv, usage); } -#endif +static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job) +{ + struct xe_bo *bo = xe_vma_bo(vma); -/** - * struct xe_pt_migrate_pt_update - Callback argument for pre-commit callbacks - * @base: Base we derive from. - * @bind: Whether this is a bind or an unbind operation. A bind operation - * makes the pre-commit callback error with -EAGAIN if it detects a - * pending invalidation. - * @locked: Whether the pre-commit callback locked the userptr notifier lock - * and it needs unlocking. - */ -struct xe_pt_migrate_pt_update { - struct xe_migrate_pt_update base; - bool bind; - bool locked; -}; + xe_bo_assert_held(bo); + + if (bo && !bo->vm) + return job_test_add_deps(job, bo->ttm.base.resv, + DMA_RESV_USAGE_KERNEL); + + return 0; +} + +static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op, + struct xe_sched_job *job) +{ + int err = 0; + + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + if (!op->map.immediate && xe_vm_in_fault_mode(vm)) + break; + + err = vma_add_deps(op->map.vma, job); + break; + case DRM_GPUVA_OP_REMAP: + if (op->remap.prev) + err = vma_add_deps(op->remap.prev, job); + if (!err && op->remap.next) + err = vma_add_deps(op->remap.next, job); + break; + case DRM_GPUVA_OP_UNMAP: + break; + case DRM_GPUVA_OP_PREFETCH: + err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job); + break; + default: + drm_warn(&vm->xe->drm, "NOT POSSIBLE"); + } + + return err; +} -/* - * This function adds the needed dependencies to a page-table update job - * to make sure racing jobs for separate bind engines don't race writing - * to the same page-table range, wreaking havoc. Initially use a single - * fence for the entire VM. An optimization would use smaller granularity. - */ static int xe_pt_vm_dependencies(struct xe_sched_job *job, - struct xe_range_fence_tree *rftree, - u64 start, u64 last) + struct xe_vm *vm, + struct xe_vma_ops *vops, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_range_fence_tree *rftree) { struct xe_range_fence *rtfence; struct dma_fence *fence; - int err; + struct xe_vma_op *op; + int err = 0, i; - rtfence = xe_range_fence_tree_first(rftree, start, last); + xe_vm_assert_held(vm); + + if (!job && !no_in_syncs(vops->syncs, vops->num_syncs)) + return -ETIME; + + if (!job && !xe_exec_queue_is_idle(pt_update_ops->q)) + return -ETIME; + + if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) { + err = job_test_add_deps(job, xe_vm_resv(vm), + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_BOOKKEEP : + DMA_RESV_USAGE_KERNEL); + if (err) + return err; + } + + rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start, + pt_update_ops->last); while (rtfence) { fence = rtfence->fence; @@ -1029,80 +1080,173 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job, return err; } - rtfence = xe_range_fence_tree_next(rtfence, start, last); + rtfence = xe_range_fence_tree_next(rtfence, + pt_update_ops->start, + pt_update_ops->last); } - return 0; + list_for_each_entry(op, &vops->list, link) { + err = op_add_deps(vm, op, job); + if (err) + return err; + } + + if (job) + err = xe_sched_job_last_fence_add_dep(job, vm); + else + err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm); + + for (i = 0; job && !err && i < vops->num_syncs; i++) + err = xe_sync_entry_add_deps(&vops->syncs[i], job); + + return err; } static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update) { - struct xe_range_fence_tree *rftree = - &xe_vma_vm(pt_update->vma)->rftree[pt_update->tile_id]; + struct xe_vma_ops *vops = pt_update->vops; + struct xe_vm *vm = vops->vm; + struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id]; + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[pt_update->tile_id]; - return xe_pt_vm_dependencies(pt_update->job, rftree, - pt_update->start, pt_update->last); + return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops, + pt_update_ops, rftree); } -static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) +#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT + +static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) { - struct xe_pt_migrate_pt_update *userptr_update = - container_of(pt_update, typeof(*userptr_update), base); - struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma); - unsigned long notifier_seq = uvma->userptr.notifier_seq; - struct xe_vm *vm = xe_vma_vm(&uvma->vma); - int err = xe_pt_vm_dependencies(pt_update->job, - &vm->rftree[pt_update->tile_id], - pt_update->start, - pt_update->last); + u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2; + static u32 count; - if (err) - return err; - - userptr_update->locked = false; - - /* - * Wait until nobody is running the invalidation notifier, and - * since we're exiting the loop holding the notifier lock, - * nobody can proceed invalidating either. - * - * Note that we don't update the vma->userptr.notifier_seq since - * we don't update the userptr pages. - */ - do { - down_read(&vm->userptr.notifier_lock); - if (!mmu_interval_read_retry(&uvma->userptr.notifier, - notifier_seq)) - break; - - up_read(&vm->userptr.notifier_lock); - - if (userptr_update->bind) - return -EAGAIN; - - notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier); - } while (true); - - /* Inject errors to test_whether they are handled correctly */ - if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) { - up_read(&vm->userptr.notifier_lock); - return -EAGAIN; + if (count++ % divisor == divisor - 1) { + uvma->userptr.divisor = divisor << 1; + return true; } - userptr_update->locked = true; + return false; +} + +#else + +static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) +{ + return false; +} + +#endif + +static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, + struct xe_vm_pgtable_update_ops *pt_update) +{ + struct xe_userptr_vma *uvma; + unsigned long notifier_seq; + + lockdep_assert_held_read(&vm->userptr.notifier_lock); + + if (!xe_vma_is_userptr(vma)) + return 0; + + uvma = to_userptr_vma(vma); + notifier_seq = uvma->userptr.notifier_seq; + + if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm)) + return 0; + + if (!mmu_interval_read_retry(&uvma->userptr.notifier, + notifier_seq) && + !xe_pt_userptr_inject_eagain(uvma)) + return 0; + + if (xe_vm_in_fault_mode(vm)) { + return -EAGAIN; + } else { + spin_lock(&vm->userptr.invalidated_lock); + list_move_tail(&uvma->userptr.invalidate_link, + &vm->userptr.invalidated); + spin_unlock(&vm->userptr.invalidated_lock); + + if (xe_vm_in_preempt_fence_mode(vm)) { + struct dma_resv_iter cursor; + struct dma_fence *fence; + long err; + + dma_resv_iter_begin(&cursor, xe_vm_resv(vm), + DMA_RESV_USAGE_BOOKKEEP); + dma_resv_for_each_fence_unlocked(&cursor, fence) + dma_fence_enable_sw_signaling(fence); + dma_resv_iter_end(&cursor); + + err = dma_resv_wait_timeout(xe_vm_resv(vm), + DMA_RESV_USAGE_BOOKKEEP, + false, MAX_SCHEDULE_TIMEOUT); + XE_WARN_ON(err <= 0); + } + } return 0; } -static const struct xe_migrate_pt_update_ops bind_ops = { - .populate = xe_vm_populate_pgtable, - .pre_commit = xe_pt_pre_commit, -}; +static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op, + struct xe_vm_pgtable_update_ops *pt_update) +{ + int err = 0; -static const struct xe_migrate_pt_update_ops userptr_bind_ops = { - .populate = xe_vm_populate_pgtable, - .pre_commit = xe_pt_userptr_pre_commit, -}; + lockdep_assert_held_read(&vm->userptr.notifier_lock); + + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + if (!op->map.immediate && xe_vm_in_fault_mode(vm)) + break; + + err = vma_check_userptr(vm, op->map.vma, pt_update); + break; + case DRM_GPUVA_OP_REMAP: + if (op->remap.prev) + err = vma_check_userptr(vm, op->remap.prev, pt_update); + if (!err && op->remap.next) + err = vma_check_userptr(vm, op->remap.next, pt_update); + break; + case DRM_GPUVA_OP_UNMAP: + break; + case DRM_GPUVA_OP_PREFETCH: + err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), + pt_update); + break; + default: + drm_warn(&vm->xe->drm, "NOT POSSIBLE"); + } + + return err; +} + +static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) +{ + struct xe_vm *vm = pt_update->vops->vm; + struct xe_vma_ops *vops = pt_update->vops; + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[pt_update->tile_id]; + struct xe_vma_op *op; + int err; + + err = xe_pt_pre_commit(pt_update); + if (err) + return err; + + down_read(&vm->userptr.notifier_lock); + + list_for_each_entry(op, &vops->list, link) { + err = op_check_userptr(vm, op, pt_update_ops); + if (err) { + up_read(&vm->userptr.notifier_lock); + break; + } + } + + return err; +} struct invalidation_fence { struct xe_gt_tlb_invalidation_fence base; @@ -1200,190 +1344,6 @@ static int invalidation_fence_init(struct xe_gt *gt, return ret && ret != -ENOENT ? ret : 0; } -static void xe_pt_calc_rfence_interval(struct xe_vma *vma, - struct xe_pt_migrate_pt_update *update, - struct xe_vm_pgtable_update *entries, - u32 num_entries) -{ - int i, level = 0; - - for (i = 0; i < num_entries; i++) { - const struct xe_vm_pgtable_update *entry = &entries[i]; - - if (entry->pt->level > level) - level = entry->pt->level; - } - - /* Greedy (non-optimal) calculation but simple */ - update->base.start = ALIGN_DOWN(xe_vma_start(vma), - 0x1ull << xe_pt_shift(level)); - update->base.last = ALIGN(xe_vma_end(vma), - 0x1ull << xe_pt_shift(level)) - 1; -} - -/** - * __xe_pt_bind_vma() - Build and connect a page-table tree for the vma - * address range. - * @tile: The tile to bind for. - * @vma: The vma to bind. - * @q: The exec_queue with which to do pipelined page-table updates. - * @syncs: Entries to sync on before binding the built tree to the live vm tree. - * @num_syncs: Number of @sync entries. - * @rebind: Whether we're rebinding this vma to the same address range without - * an unbind in-between. - * - * This function builds a page-table tree (see xe_pt_stage_bind() for more - * information on page-table building), and the xe_vm_pgtable_update entries - * abstracting the operations needed to attach it to the main vm tree. It - * then takes the relevant locks and updates the metadata side of the main - * vm tree and submits the operations for pipelined attachment of the - * gpu page-table to the vm main tree, (which can be done either by the - * cpu and the GPU). - * - * Return: A valid dma-fence representing the pipelined attachment operation - * on success, an error pointer on error. - */ -struct dma_fence * -__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs, - bool rebind) -{ - struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1]; - struct xe_pt_migrate_pt_update bind_pt_update = { - .base = { - .ops = xe_vma_is_userptr(vma) ? &userptr_bind_ops : &bind_ops, - .vma = vma, - .tile_id = tile->id, - }, - .bind = true, - }; - struct xe_vm *vm = xe_vma_vm(vma); - u32 num_entries; - struct dma_fence *fence; - struct invalidation_fence *ifence = NULL; - struct xe_range_fence *rfence; - int err; - - bind_pt_update.locked = false; - xe_bo_assert_held(xe_vma_bo(vma)); - xe_vm_assert_held(vm); - - vm_dbg(&xe_vma_vm(vma)->xe->drm, - "Preparing bind, with range [%llx...%llx) engine %p.\n", - xe_vma_start(vma), xe_vma_end(vma), q); - - err = xe_pt_prepare_bind(tile, vma, entries, &num_entries); - if (err) - goto err; - - err = dma_resv_reserve_fences(xe_vm_resv(vm), 1); - if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) - err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1); - if (err) - goto err; - - xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries)); - - xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries); - xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries, - num_entries); - - /* - * If rebind, we have to invalidate TLB on !LR vms to invalidate - * cached PTEs point to freed memory. on LR vms this is done - * automatically when the context is re-enabled by the rebind worker, - * or in fault mode it was invalidated on PTE zapping. - * - * If !rebind, and scratch enabled VMs, there is a chance the scratch - * PTE is already cached in the TLB so it needs to be invalidated. - * on !LR VMs this is done in the ring ops preceding a batch, but on - * non-faulting LR, in particular on user-space batch buffer chaining, - * it needs to be done here. - */ - if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) { - ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); - if (!ifence) - return ERR_PTR(-ENOMEM); - } else if (rebind && !xe_vm_in_lr_mode(vm)) { - /* We bump also if batch_invalidate_tlb is true */ - vm->tlb_flush_seqno++; - } - - rfence = kzalloc(sizeof(*rfence), GFP_KERNEL); - if (!rfence) { - kfree(ifence); - return ERR_PTR(-ENOMEM); - } - - fence = xe_migrate_update_pgtables(tile->migrate, - vm, xe_vma_bo(vma), q, - entries, num_entries, - syncs, num_syncs, - &bind_pt_update.base); - if (!IS_ERR(fence)) { - bool last_munmap_rebind = vma->gpuva.flags & XE_VMA_LAST_REBIND; - LLIST_HEAD(deferred); - int err; - - err = xe_range_fence_insert(&vm->rftree[tile->id], rfence, - &xe_range_fence_kfree_ops, - bind_pt_update.base.start, - bind_pt_update.base.last, fence); - if (err) - dma_fence_wait(fence, false); - - /* TLB invalidation must be done before signaling rebind */ - if (ifence) { - int err = invalidation_fence_init(tile->primary_gt, - ifence, fence, - xe_vma_start(vma), - xe_vma_end(vma), - xe_vma_vm(vma)->usm.asid); - if (err) { - dma_fence_put(fence); - kfree(ifence); - return ERR_PTR(err); - } - fence = &ifence->base.base; - } - - /* add shared fence now for pagetable delayed destroy */ - dma_resv_add_fence(xe_vm_resv(vm), fence, rebind || - last_munmap_rebind ? - DMA_RESV_USAGE_KERNEL : - DMA_RESV_USAGE_BOOKKEEP); - - if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) - dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, - DMA_RESV_USAGE_BOOKKEEP); - xe_pt_commit_bind(vma, entries, num_entries, rebind, - bind_pt_update.locked ? &deferred : NULL); - - /* This vma is live (again?) now */ - vma->tile_present |= BIT(tile->id); - - if (bind_pt_update.locked) { - to_userptr_vma(vma)->userptr.initial_bind = true; - up_read(&vm->userptr.notifier_lock); - xe_bo_put_commit(&deferred); - } - if (!rebind && last_munmap_rebind && - xe_vm_in_preempt_fence_mode(vm)) - xe_vm_queue_rebind_worker(vm); - } else { - kfree(rfence); - kfree(ifence); - if (bind_pt_update.locked) - up_read(&vm->userptr.notifier_lock); - xe_pt_abort_bind(vma, entries, num_entries); - } - - return fence; - -err: - return ERR_PTR(err); -} - struct xe_pt_stage_unbind_walk { /** @base: The pagewalk base-class. */ struct xe_pt_walk base; @@ -1534,8 +1494,8 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update, void *ptr, u32 qword_ofs, u32 num_qwords, const struct xe_vm_pgtable_update *update) { - struct xe_vma *vma = pt_update->vma; - u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level); + struct xe_vm *vm = pt_update->vops->vm; + u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level); int i; if (map && map->is_iomem) @@ -1579,151 +1539,487 @@ xe_pt_commit_unbind(struct xe_vma *vma, } } -static const struct xe_migrate_pt_update_ops unbind_ops = { - .populate = xe_migrate_clear_pgtable_callback, - .pre_commit = xe_pt_pre_commit, -}; - -static const struct xe_migrate_pt_update_ops userptr_unbind_ops = { - .populate = xe_migrate_clear_pgtable_callback, - .pre_commit = xe_pt_userptr_pre_commit, -}; - -/** - * __xe_pt_unbind_vma() - Disconnect and free a page-table tree for the vma - * address range. - * @tile: The tile to unbind for. - * @vma: The vma to unbind. - * @q: The exec_queue with which to do pipelined page-table updates. - * @syncs: Entries to sync on before disconnecting the tree to be destroyed. - * @num_syncs: Number of @sync entries. - * - * This function builds a the xe_vm_pgtable_update entries abstracting the - * operations needed to detach the page-table tree to be destroyed from the - * man vm tree. - * It then takes the relevant locks and submits the operations for - * pipelined detachment of the gpu page-table from the vm main tree, - * (which can be done either by the cpu and the GPU), Finally it frees the - * detached page-table tree. - * - * Return: A valid dma-fence representing the pipelined detachment operation - * on success, an error pointer on error. - */ -struct dma_fence * -__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs) +static void +xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma *vma) { - struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1]; - struct xe_pt_migrate_pt_update unbind_pt_update = { - .base = { - .ops = xe_vma_is_userptr(vma) ? &userptr_unbind_ops : - &unbind_ops, - .vma = vma, - .tile_id = tile->id, - }, - }; - struct xe_vm *vm = xe_vma_vm(vma); - u32 num_entries; - struct dma_fence *fence = NULL; - struct invalidation_fence *ifence; - struct xe_range_fence *rfence; + u32 current_op = pt_update_ops->current_op; + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; + int i, level = 0; + u64 start, last; + + for (i = 0; i < pt_op->num_entries; i++) { + const struct xe_vm_pgtable_update *entry = &pt_op->entries[i]; + + if (entry->pt->level > level) + level = entry->pt->level; + } + + /* Greedy (non-optimal) calculation but simple */ + start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level)); + last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1; + + if (start < pt_update_ops->start) + pt_update_ops->start = start; + if (last > pt_update_ops->last) + pt_update_ops->last = last; +} + +static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma) +{ + if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) + return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, + xe->info.tile_count); + + return 0; +} + +static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma *vma) +{ + u32 current_op = pt_update_ops->current_op; + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; + struct llist_head *deferred = &pt_update_ops->deferred; int err; - LLIST_HEAD(deferred); - xe_bo_assert_held(xe_vma_bo(vma)); - xe_vm_assert_held(vm); vm_dbg(&xe_vma_vm(vma)->xe->drm, - "Preparing unbind, with range [%llx...%llx) engine %p.\n", - xe_vma_start(vma), xe_vma_end(vma), q); + "Preparing bind, with range [%llx...%llx)\n", + xe_vma_start(vma), xe_vma_end(vma) - 1); - num_entries = xe_pt_stage_unbind(tile, vma, entries); - xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries)); + pt_op->vma = NULL; + pt_op->bind = true; + pt_op->rebind = BIT(tile->id) & vma->tile_present; - xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries); - xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries, - num_entries); - - err = dma_resv_reserve_fences(xe_vm_resv(vm), 1); - if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) - err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1); + err = vma_reserve_fences(tile_to_xe(tile), vma); if (err) - return ERR_PTR(err); + return err; - ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); - if (!ifence) - return ERR_PTR(-ENOMEM); + err = xe_pt_prepare_bind(tile, vma, pt_op->entries, + &pt_op->num_entries); + if (!err) { + xe_tile_assert(tile, pt_op->num_entries <= + ARRAY_SIZE(pt_op->entries)); + xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, + pt_op->num_entries, true); - rfence = kzalloc(sizeof(*rfence), GFP_KERNEL); - if (!rfence) { - kfree(ifence); - return ERR_PTR(-ENOMEM); + xe_pt_update_ops_rfence_interval(pt_update_ops, vma); + ++pt_update_ops->current_op; + pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); + + /* + * If rebind, we have to invalidate TLB on !LR vms to invalidate + * cached PTEs point to freed memory. On LR vms this is done + * automatically when the context is re-enabled by the rebind worker, + * or in fault mode it was invalidated on PTE zapping. + * + * If !rebind, and scratch enabled VMs, there is a chance the scratch + * PTE is already cached in the TLB so it needs to be invalidated. + * On !LR VMs this is done in the ring ops preceding a batch, but on + * non-faulting LR, in particular on user-space batch buffer chaining, + * it needs to be done here. + */ + if ((!pt_op->rebind && xe_vm_has_scratch(vm) && + xe_vm_in_preempt_fence_mode(vm))) + pt_update_ops->needs_invalidation = true; + else if (pt_op->rebind && !xe_vm_in_lr_mode(vm)) + /* We bump also if batch_invalidate_tlb is true */ + vm->tlb_flush_seqno++; + + /* FIXME: Don't commit right away */ + vma->tile_staged |= BIT(tile->id); + pt_op->vma = vma; + xe_pt_commit_bind(vma, pt_op->entries, pt_op->num_entries, + pt_op->rebind, deferred); + } + + return err; +} + +static int unbind_op_prepare(struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma *vma) +{ + u32 current_op = pt_update_ops->current_op; + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; + struct llist_head *deferred = &pt_update_ops->deferred; + int err; + + if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id))) + return 0; + + xe_bo_assert_held(xe_vma_bo(vma)); + + vm_dbg(&xe_vma_vm(vma)->xe->drm, + "Preparing unbind, with range [%llx...%llx)\n", + xe_vma_start(vma), xe_vma_end(vma) - 1); + + /* + * Wait for invalidation to complete. Can corrupt internal page table + * state if an invalidation is running while preparing an unbind. + */ + if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma))) + mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier); + + pt_op->vma = vma; + pt_op->bind = false; + pt_op->rebind = false; + + err = vma_reserve_fences(tile_to_xe(tile), vma); + if (err) + return err; + + pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries); + + xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, + pt_op->num_entries, false); + xe_pt_update_ops_rfence_interval(pt_update_ops, vma); + ++pt_update_ops->current_op; + pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); + pt_update_ops->needs_invalidation = true; + + /* FIXME: Don't commit right away */ + xe_pt_commit_unbind(vma, pt_op->entries, pt_op->num_entries, + deferred); + + return 0; +} + +static int op_prepare(struct xe_vm *vm, + struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma_op *op) +{ + int err = 0; + + xe_vm_assert_held(vm); + + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + if (!op->map.immediate && xe_vm_in_fault_mode(vm)) + break; + + err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma); + pt_update_ops->wait_vm_kernel = true; + break; + case DRM_GPUVA_OP_REMAP: + err = unbind_op_prepare(tile, pt_update_ops, + gpuva_to_vma(op->base.remap.unmap->va)); + + if (!err && op->remap.prev) { + err = bind_op_prepare(vm, tile, pt_update_ops, + op->remap.prev); + pt_update_ops->wait_vm_bookkeep = true; + } + if (!err && op->remap.next) { + err = bind_op_prepare(vm, tile, pt_update_ops, + op->remap.next); + pt_update_ops->wait_vm_bookkeep = true; + } + break; + case DRM_GPUVA_OP_UNMAP: + err = unbind_op_prepare(tile, pt_update_ops, + gpuva_to_vma(op->base.unmap.va)); + break; + case DRM_GPUVA_OP_PREFETCH: + err = bind_op_prepare(vm, tile, pt_update_ops, + gpuva_to_vma(op->base.prefetch.va)); + pt_update_ops->wait_vm_kernel = true; + break; + default: + drm_warn(&vm->xe->drm, "NOT POSSIBLE"); + } + + return err; +} + +static void +xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops) +{ + init_llist_head(&pt_update_ops->deferred); + pt_update_ops->start = ~0x0ull; + pt_update_ops->last = 0x0ull; +} + +/** + * xe_pt_update_ops_prepare() - Prepare PT update operations + * @tile: Tile of PT update operations + * @vops: VMA operationa + * + * Prepare PT update operations which includes updating internal PT state, + * allocate memory for page tables, populate page table being pruned in, and + * create PT update operations for leaf insertion / removal. + * + * Return: 0 on success, negative error code on error. + */ +int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops) +{ + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[tile->id]; + struct xe_vma_op *op; + int err; + + lockdep_assert_held(&vops->vm->lock); + xe_vm_assert_held(vops->vm); + + xe_pt_update_ops_init(pt_update_ops); + + err = dma_resv_reserve_fences(xe_vm_resv(vops->vm), + tile_to_xe(tile)->info.tile_count); + if (err) + return err; + + list_for_each_entry(op, &vops->list, link) { + err = op_prepare(vops->vm, tile, pt_update_ops, op); + + if (err) + return err; + } + + xe_tile_assert(tile, pt_update_ops->current_op <= + pt_update_ops->num_ops); + + return 0; +} + +static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma *vma, struct dma_fence *fence) +{ + if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) + dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : + DMA_RESV_USAGE_BOOKKEEP); + vma->tile_present |= BIT(tile->id); + vma->tile_staged &= ~BIT(tile->id); + if (xe_vma_is_userptr(vma)) { + lockdep_assert_held_read(&vm->userptr.notifier_lock); + to_userptr_vma(vma)->userptr.initial_bind = true; } /* - * Even if we were already evicted and unbind to destroy, we need to - * clear again here. The eviction may have updated pagetables at a - * lower level, because it needs to be more conservative. + * Kick rebind worker if this bind triggers preempt fences and not in + * the rebind worker */ - fence = xe_migrate_update_pgtables(tile->migrate, - vm, NULL, q ? q : - vm->q[tile->id], - entries, num_entries, - syncs, num_syncs, - &unbind_pt_update.base); - if (!IS_ERR(fence)) { - int err; + if (pt_update_ops->wait_vm_bookkeep && + xe_vm_in_preempt_fence_mode(vm) && + !current->mm) + xe_vm_queue_rebind_worker(vm); +} - err = xe_range_fence_insert(&vm->rftree[tile->id], rfence, - &xe_range_fence_kfree_ops, - unbind_pt_update.base.start, - unbind_pt_update.base.last, fence); - if (err) - dma_fence_wait(fence, false); - - /* TLB invalidation must be done before signaling unbind */ - err = invalidation_fence_init(tile->primary_gt, ifence, fence, - xe_vma_start(vma), - xe_vma_end(vma), - xe_vma_vm(vma)->usm.asid); - if (err) { - dma_fence_put(fence); - kfree(ifence); - return ERR_PTR(err); - } - fence = &ifence->base.base; - - /* add shared fence now for pagetable delayed destroy */ - dma_resv_add_fence(xe_vm_resv(vm), fence, +static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma *vma, struct dma_fence *fence) +{ + if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) + dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_BOOKKEEP); - - /* This fence will be installed by caller when doing eviction */ - if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) - dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, - DMA_RESV_USAGE_BOOKKEEP); - xe_pt_commit_unbind(vma, entries, num_entries, - unbind_pt_update.locked ? &deferred : NULL); - vma->tile_present &= ~BIT(tile->id); - } else { - kfree(rfence); - kfree(ifence); - } - - if (!vma->tile_present) + vma->tile_present &= ~BIT(tile->id); + if (!vma->tile_present) { list_del_init(&vma->combined_links.rebind); + if (xe_vma_is_userptr(vma)) { + lockdep_assert_held_read(&vm->userptr.notifier_lock); - if (unbind_pt_update.locked) { - xe_tile_assert(tile, xe_vma_is_userptr(vma)); - - if (!vma->tile_present) { spin_lock(&vm->userptr.invalidated_lock); list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link); spin_unlock(&vm->userptr.invalidated_lock); } - up_read(&vm->userptr.notifier_lock); - xe_bo_put_commit(&deferred); + } +} + +static void op_commit(struct xe_vm *vm, + struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma_op *op, struct dma_fence *fence) +{ + xe_vm_assert_held(vm); + + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + if (!op->map.immediate && xe_vm_in_fault_mode(vm)) + break; + + bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence); + break; + case DRM_GPUVA_OP_REMAP: + unbind_op_commit(vm, tile, pt_update_ops, + gpuva_to_vma(op->base.remap.unmap->va), fence); + + if (op->remap.prev) + bind_op_commit(vm, tile, pt_update_ops, op->remap.prev, + fence); + if (op->remap.next) + bind_op_commit(vm, tile, pt_update_ops, op->remap.next, + fence); + break; + case DRM_GPUVA_OP_UNMAP: + unbind_op_commit(vm, tile, pt_update_ops, + gpuva_to_vma(op->base.unmap.va), fence); + break; + case DRM_GPUVA_OP_PREFETCH: + bind_op_commit(vm, tile, pt_update_ops, + gpuva_to_vma(op->base.prefetch.va), fence); + break; + default: + drm_warn(&vm->xe->drm, "NOT POSSIBLE"); + } +} + +static const struct xe_migrate_pt_update_ops migrate_ops = { + .populate = xe_vm_populate_pgtable, + .clear = xe_migrate_clear_pgtable_callback, + .pre_commit = xe_pt_pre_commit, +}; + +static const struct xe_migrate_pt_update_ops userptr_migrate_ops = { + .populate = xe_vm_populate_pgtable, + .clear = xe_migrate_clear_pgtable_callback, + .pre_commit = xe_pt_userptr_pre_commit, +}; + +/** + * xe_pt_update_ops_run() - Run PT update operations + * @tile: Tile of PT update operations + * @vops: VMA operationa + * + * Run PT update operations which includes committing internal PT state changes, + * creating job for PT update operations for leaf insertion / removal, and + * installing job fence in various places. + * + * Return: fence on success, negative ERR_PTR on error. + */ +struct dma_fence * +xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) +{ + struct xe_vm *vm = vops->vm; + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[tile->id]; + struct dma_fence *fence; + struct invalidation_fence *ifence = NULL; + struct xe_range_fence *rfence; + struct xe_vma_op *op; + int err = 0; + struct xe_migrate_pt_update update = { + .ops = pt_update_ops->needs_userptr_lock ? + &userptr_migrate_ops : + &migrate_ops, + .vops = vops, + .tile_id = tile->id, + }; + + lockdep_assert_held(&vm->lock); + xe_vm_assert_held(vm); + + if (!pt_update_ops->current_op) { + xe_tile_assert(tile, xe_vm_in_fault_mode(vm)); + + return dma_fence_get_stub(); } + if (pt_update_ops->needs_invalidation) { + ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); + if (!ifence) + return ERR_PTR(-ENOMEM); + } + + rfence = kzalloc(sizeof(*rfence), GFP_KERNEL); + if (!rfence) { + err = -ENOMEM; + goto free_ifence; + } + + fence = xe_migrate_update_pgtables(tile->migrate, &update); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto free_rfence; + } + + if (xe_range_fence_insert(&vm->rftree[tile->id], rfence, + &xe_range_fence_kfree_ops, + pt_update_ops->start, + pt_update_ops->last, fence)) + dma_fence_wait(fence, false); + + /* tlb invalidation must be done before signaling rebind */ + if (ifence) { + err = invalidation_fence_init(tile->primary_gt, ifence, fence, + pt_update_ops->start, + pt_update_ops->last, + vm->usm.asid); + if (err) + goto put_fence; + fence = &ifence->base.base; + } + + dma_resv_add_fence(xe_vm_resv(vm), fence, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : + DMA_RESV_USAGE_BOOKKEEP); + + list_for_each_entry(op, &vops->list, link) + op_commit(vops->vm, tile, pt_update_ops, op, fence); + + if (pt_update_ops->needs_userptr_lock) + up_read(&vm->userptr.notifier_lock); + return fence; + +put_fence: + if (pt_update_ops->needs_userptr_lock) + up_read(&vm->userptr.notifier_lock); + dma_fence_put(fence); +free_rfence: + kfree(rfence); +free_ifence: + kfree(ifence); + + return ERR_PTR(err); +} + +/** + * xe_pt_update_ops_fini() - Finish PT update operations + * @tile: Tile of PT update operations + * @vops: VMA operations + * + * Finish PT update operations by committing to destroy page table memory + */ +void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops) +{ + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[tile->id]; + int i; + + lockdep_assert_held(&vops->vm->lock); + xe_vm_assert_held(vops->vm); + + /* FIXME: Not 100% correct */ + for (i = 0; i < pt_update_ops->num_ops; ++i) { + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; + + if (pt_op->bind) + xe_pt_free_bind(pt_op->entries, pt_op->num_entries); + } + xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred); +} + +/** + * xe_pt_update_ops_abort() - Abort PT update operations + * @tile: Tile of PT update operations + * @vops: VMA operationa + * + * Abort PT update operations by unwinding internal PT state + */ +void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops) +{ + lockdep_assert_held(&vops->vm->lock); + xe_vm_assert_held(vops->vm); + + /* FIXME: Just kill VM for now + cleanup PTs */ + xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred); + xe_vm_kill(vops->vm, false); } diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h index 71a4fbfcff43..9ab386431cad 100644 --- a/drivers/gpu/drm/xe/xe_pt.h +++ b/drivers/gpu/drm/xe/xe_pt.h @@ -17,6 +17,7 @@ struct xe_sync_entry; struct xe_tile; struct xe_vm; struct xe_vma; +struct xe_vma_ops; /* Largest huge pte is currently 1GiB. May become device dependent. */ #define MAX_HUGEPTE_LEVEL 2 @@ -34,14 +35,11 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm, void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred); -struct dma_fence * -__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs, - bool rebind); - -struct dma_fence * -__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs); +int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops); +struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile, + struct xe_vma_ops *vops); +void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops); +void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops); bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma); diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h index 2093150f461e..384cc04de719 100644 --- a/drivers/gpu/drm/xe/xe_pt_types.h +++ b/drivers/gpu/drm/xe/xe_pt_types.h @@ -78,6 +78,8 @@ struct xe_vm_pgtable_update { struct xe_vm_pgtable_update_op { /** @entries: entries to update for this operation */ struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1]; + /** @vma: VMA for operation, operation not valid if NULL */ + struct xe_vma *vma; /** @num_entries: number of entries for this update operation */ u32 num_entries; /** @bind: is a bind */ @@ -86,4 +88,38 @@ struct xe_vm_pgtable_update_op { bool rebind; }; +/** struct xe_vm_pgtable_update_ops: page table update operations */ +struct xe_vm_pgtable_update_ops { + /** @ops: operations */ + struct xe_vm_pgtable_update_op *ops; + /** @deferred: deferred list to destroy PT entries */ + struct llist_head deferred; + /** @q: exec queue for PT operations */ + struct xe_exec_queue *q; + /** @start: start address of ops */ + u64 start; + /** @last: last address of ops */ + u64 last; + /** @num_ops: number of operations */ + u32 num_ops; + /** @current_op: current operations */ + u32 current_op; + /** @needs_userptr_lock: Needs userptr lock */ + bool needs_userptr_lock; + /** @needs_invalidation: Needs invalidation */ + bool needs_invalidation; + /** + * @wait_vm_bookkeep: PT operations need to wait until VM is idle + * (bookkeep dma-resv slots are idle) and stage all future VM activity + * behind these operations (install PT operations into VM kernel + * dma-resv slot). + */ + bool wait_vm_bookkeep; + /** + * @wait_vm_kernel: PT operations need to wait until VM kernel dma-resv + * slots are idle. + */ + bool wait_vm_kernel; +}; + #endif diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index 8941522b7705..f3060979e63f 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -84,6 +84,13 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, unsigned int size) { + /* + * BB to large, return -ENOBUFS indicating user should split + * array of binds into smaller chunks. + */ + if (size > sa_manager->base.size) + return ERR_PTR(-ENOBUFS); + return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0); } diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 6677874af5a4..73cc6b0efcef 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -313,7 +313,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm) #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000 -/* +/** * xe_vm_kill() - VM Kill * @vm: The VM. * @unlocked: Flag indicates the VM's dma-resv is not held @@ -321,7 +321,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm) * Kill the VM by setting banned flag indicated VM is no longer available for * use. If in preempt fence mode, also kill all exec queue attached to the VM. */ -static void xe_vm_kill(struct xe_vm *vm, bool unlocked) +void xe_vm_kill(struct xe_vm *vm, bool unlocked) { struct xe_exec_queue *q; @@ -798,7 +798,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) struct xe_vma *vma, *next; struct xe_vma_ops vops; struct xe_vma_op *op, *next_op; - int err; + int err, i; lockdep_assert_held(&vm->lock); if ((xe_vm_in_lr_mode(vm) && !rebind_worker) || @@ -806,6 +806,8 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) return 0; xe_vma_ops_init(&vops, vm, NULL, NULL, 0); + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) + vops.pt_update_ops[i].wait_vm_bookkeep = true; xe_vm_assert_held(vm); list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) { @@ -850,6 +852,8 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma struct dma_fence *fence = NULL; struct xe_vma_ops vops; struct xe_vma_op *op, *next_op; + struct xe_tile *tile; + u8 id; int err; lockdep_assert_held(&vm->lock); @@ -857,6 +861,11 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); xe_vma_ops_init(&vops, vm, NULL, NULL, 0); + for_each_tile(tile, vm->xe, id) { + vops.pt_update_ops[id].wait_vm_bookkeep = true; + vops.pt_update_ops[tile->id].q = + xe_tile_migrate_exec_queue(tile); + } err = xe_vm_ops_add_rebind(&vops, vma, tile_mask); if (err) @@ -1697,147 +1706,6 @@ to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) return q ? q : vm->q[0]; } -static struct dma_fence * -xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs, - bool first_op, bool last_op) -{ - struct xe_vm *vm = xe_vma_vm(vma); - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); - struct xe_tile *tile; - struct dma_fence *fence = NULL; - struct dma_fence **fences = NULL; - struct dma_fence_array *cf = NULL; - int cur_fence = 0; - int number_tiles = hweight8(vma->tile_present); - int err; - u8 id; - - trace_xe_vma_unbind(vma); - - if (number_tiles > 1) { - fences = kmalloc_array(number_tiles, sizeof(*fences), - GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); - } - - for_each_tile(tile, vm->xe, id) { - if (!(vma->tile_present & BIT(id))) - goto next; - - fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id], - first_op ? syncs : NULL, - first_op ? num_syncs : 0); - if (IS_ERR(fence)) { - err = PTR_ERR(fence); - goto err_fences; - } - - if (fences) - fences[cur_fence++] = fence; - -next: - if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) - q = list_next_entry(q, multi_gt_list); - } - - if (fences) { - cf = dma_fence_array_create(number_tiles, fences, - vm->composite_fence_ctx, - vm->composite_fence_seqno++, - false); - if (!cf) { - --vm->composite_fence_seqno; - err = -ENOMEM; - goto err_fences; - } - } - - fence = cf ? &cf->base : !fence ? - xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence; - - return fence; - -err_fences: - if (fences) { - while (cur_fence) - dma_fence_put(fences[--cur_fence]); - kfree(fences); - } - - return ERR_PTR(err); -} - -static struct dma_fence * -xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs, - u8 tile_mask, bool first_op, bool last_op) -{ - struct xe_tile *tile; - struct dma_fence *fence; - struct dma_fence **fences = NULL; - struct dma_fence_array *cf = NULL; - struct xe_vm *vm = xe_vma_vm(vma); - int cur_fence = 0; - int number_tiles = hweight8(tile_mask); - int err; - u8 id; - - trace_xe_vma_bind(vma); - - if (number_tiles > 1) { - fences = kmalloc_array(number_tiles, sizeof(*fences), - GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); - } - - for_each_tile(tile, vm->xe, id) { - if (!(tile_mask & BIT(id))) - goto next; - - fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id], - first_op ? syncs : NULL, - first_op ? num_syncs : 0, - vma->tile_present & BIT(id)); - if (IS_ERR(fence)) { - err = PTR_ERR(fence); - goto err_fences; - } - - if (fences) - fences[cur_fence++] = fence; - -next: - if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) - q = list_next_entry(q, multi_gt_list); - } - - if (fences) { - cf = dma_fence_array_create(number_tiles, fences, - vm->composite_fence_ctx, - vm->composite_fence_seqno++, - false); - if (!cf) { - --vm->composite_fence_seqno; - err = -ENOMEM; - goto err_fences; - } - } - - return cf ? &cf->base : fence; - -err_fences: - if (fences) { - while (cur_fence) - dma_fence_put(fences[--cur_fence]); - kfree(fences); - } - - return ERR_PTR(err); -} - static struct xe_user_fence * find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs) { @@ -1853,48 +1721,6 @@ find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs) return NULL; } -static struct dma_fence * -xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs, - u8 tile_mask, bool immediate, bool first_op, bool last_op) -{ - struct dma_fence *fence; - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); - - xe_vm_assert_held(vm); - xe_bo_assert_held(bo); - - if (immediate) { - fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask, - first_op, last_op); - if (IS_ERR(fence)) - return fence; - } else { - xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); - - fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); - } - - return fence; -} - -static struct dma_fence * -xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, - struct xe_exec_queue *q, struct xe_sync_entry *syncs, - u32 num_syncs, bool first_op, bool last_op) -{ - struct dma_fence *fence; - - xe_vm_assert_held(vm); - xe_bo_assert_held(xe_vma_bo(vma)); - - fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op); - if (IS_ERR(fence)) - return fence; - - return fence; -} - #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \ DRM_XE_VM_CREATE_FLAG_LR_MODE | \ DRM_XE_VM_CREATE_FLAG_FAULT_MODE) @@ -2035,21 +1861,6 @@ static const u32 region_to_mem_type[] = { XE_PL_VRAM1, }; -static struct dma_fence * -xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, - struct xe_exec_queue *q, struct xe_sync_entry *syncs, - u32 num_syncs, bool first_op, bool last_op) -{ - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); - - if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) { - return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs, - vma->tile_mask, true, first_op, last_op); - } else { - return xe_exec_queue_last_fence_get(wait_exec_queue, vm); - } -} - static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, bool post_commit) { @@ -2337,13 +2148,10 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) return err; } -static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, - struct drm_gpuva_ops *ops, - struct xe_sync_entry *syncs, u32 num_syncs, - struct xe_vma_ops *vops, bool last) +static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, + struct xe_vma_ops *vops) { struct xe_device *xe = vm->xe; - struct xe_vma_op *last_op = NULL; struct drm_gpuva_op *__op; struct xe_tile *tile; u8 id, tile_mask = 0; @@ -2357,19 +2165,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, drm_gpuva_for_each_op(__op, ops) { struct xe_vma_op *op = gpuva_op_to_vma_op(__op); struct xe_vma *vma; - bool first = list_empty(&vops->list); unsigned int flags = 0; INIT_LIST_HEAD(&op->link); list_add_tail(&op->link, &vops->list); - - if (first) { - op->flags |= XE_VMA_OP_FIRST; - op->num_syncs = num_syncs; - op->syncs = syncs; - } - - op->q = q; op->tile_mask = tile_mask; switch (op->base.op) { @@ -2482,197 +2281,21 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, } case DRM_GPUVA_OP_UNMAP: case DRM_GPUVA_OP_PREFETCH: + /* FIXME: Need to skip some prefetch ops */ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break; default: drm_warn(&vm->xe->drm, "NOT POSSIBLE"); } - last_op = op; - err = xe_vma_op_commit(vm, op); if (err) return err; } - /* FIXME: Unhandled corner case */ - XE_WARN_ON(!last_op && last && !list_empty(&vops->list)); - - if (!last_op) - return 0; - - if (last) { - last_op->flags |= XE_VMA_OP_LAST; - last_op->num_syncs = num_syncs; - last_op->syncs = syncs; - } - return 0; } -static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma, - struct xe_vma_op *op) -{ - struct dma_fence *fence = NULL; - - lockdep_assert_held(&vm->lock); - - xe_vm_assert_held(vm); - xe_bo_assert_held(xe_vma_bo(vma)); - - switch (op->base.op) { - case DRM_GPUVA_OP_MAP: - fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma), - op->syncs, op->num_syncs, - op->tile_mask, - op->map.immediate || !xe_vm_in_fault_mode(vm), - op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST); - break; - case DRM_GPUVA_OP_REMAP: - { - bool prev = !!op->remap.prev; - bool next = !!op->remap.next; - - if (!op->remap.unmap_done) { - if (prev || next) - vma->gpuva.flags |= XE_VMA_FIRST_REBIND; - fence = xe_vm_unbind(vm, vma, op->q, op->syncs, - op->num_syncs, - op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST && - !prev && !next); - if (IS_ERR(fence)) - break; - op->remap.unmap_done = true; - } - - if (prev) { - op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND; - dma_fence_put(fence); - fence = xe_vm_bind(vm, op->remap.prev, op->q, - xe_vma_bo(op->remap.prev), op->syncs, - op->num_syncs, - op->remap.prev->tile_mask, true, - false, - op->flags & XE_VMA_OP_LAST && !next); - op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND; - if (IS_ERR(fence)) - break; - op->remap.prev = NULL; - } - - if (next) { - op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND; - dma_fence_put(fence); - fence = xe_vm_bind(vm, op->remap.next, op->q, - xe_vma_bo(op->remap.next), - op->syncs, op->num_syncs, - op->remap.next->tile_mask, true, - false, op->flags & XE_VMA_OP_LAST); - op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND; - if (IS_ERR(fence)) - break; - op->remap.next = NULL; - } - - break; - } - case DRM_GPUVA_OP_UNMAP: - fence = xe_vm_unbind(vm, vma, op->q, op->syncs, - op->num_syncs, op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST); - break; - case DRM_GPUVA_OP_PREFETCH: - fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs, - op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST); - break; - default: - drm_warn(&vm->xe->drm, "NOT POSSIBLE"); - } - - if (IS_ERR(fence)) - trace_xe_vma_fail(vma); - - return fence; -} - -static struct dma_fence * -__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, - struct xe_vma_op *op) -{ - struct dma_fence *fence; - int err; - -retry_userptr: - fence = op_execute(vm, vma, op); - if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) { - lockdep_assert_held_write(&vm->lock); - - if (op->base.op == DRM_GPUVA_OP_REMAP) { - if (!op->remap.unmap_done) - vma = gpuva_to_vma(op->base.remap.unmap->va); - else if (op->remap.prev) - vma = op->remap.prev; - else - vma = op->remap.next; - } - - if (xe_vma_is_userptr(vma)) { - err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); - if (!err) - goto retry_userptr; - - fence = ERR_PTR(err); - trace_xe_vma_fail(vma); - } - } - - return fence; -} - -static struct dma_fence * -xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) -{ - struct dma_fence *fence = ERR_PTR(-ENOMEM); - - lockdep_assert_held(&vm->lock); - - switch (op->base.op) { - case DRM_GPUVA_OP_MAP: - fence = __xe_vma_op_execute(vm, op->map.vma, op); - break; - case DRM_GPUVA_OP_REMAP: - { - struct xe_vma *vma; - - if (!op->remap.unmap_done) - vma = gpuva_to_vma(op->base.remap.unmap->va); - else if (op->remap.prev) - vma = op->remap.prev; - else - vma = op->remap.next; - - fence = __xe_vma_op_execute(vm, vma, op); - break; - } - case DRM_GPUVA_OP_UNMAP: - fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va), - op); - break; - case DRM_GPUVA_OP_PREFETCH: - fence = __xe_vma_op_execute(vm, - gpuva_to_vma(op->base.prefetch.va), - op); - break; - default: - drm_warn(&vm->xe->drm, "NOT POSSIBLE"); - } - - return fence; -} - static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, bool post_commit, bool prev_post_commit, bool next_post_commit) @@ -2858,23 +2481,110 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, return 0; } +static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) +{ + struct xe_exec_queue *q = vops->q; + struct xe_tile *tile; + int number_tiles = 0; + u8 id; + + for_each_tile(tile, vm->xe, id) { + if (vops->pt_update_ops[id].num_ops) + ++number_tiles; + + if (vops->pt_update_ops[id].q) + continue; + + if (q) { + vops->pt_update_ops[id].q = q; + if (vm->pt_root[id] && !list_empty(&q->multi_gt_list)) + q = list_next_entry(q, multi_gt_list); + } else { + vops->pt_update_ops[id].q = vm->q[id]; + } + } + + return number_tiles; +} + static struct dma_fence *ops_execute(struct xe_vm *vm, struct xe_vma_ops *vops) { - struct xe_vma_op *op, *next; + struct xe_tile *tile; struct dma_fence *fence = NULL; + struct dma_fence **fences = NULL; + struct dma_fence_array *cf = NULL; + int number_tiles = 0, current_fence = 0, err; + u8 id; - list_for_each_entry_safe(op, next, &vops->list, link) { - dma_fence_put(fence); - fence = xe_vma_op_execute(vm, op); - if (IS_ERR(fence)) { - drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld", - op->base.op, PTR_ERR(fence)); - fence = ERR_PTR(-ENOSPC); - break; + number_tiles = vm_ops_setup_tile_args(vm, vops); + if (number_tiles == 0) + return ERR_PTR(-ENODATA); + + if (number_tiles > 1) { + fences = kmalloc_array(number_tiles, sizeof(*fences), + GFP_KERNEL); + if (!fences) + return ERR_PTR(-ENOMEM); + } + + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + err = xe_pt_update_ops_prepare(tile, vops); + if (err) { + fence = ERR_PTR(err); + goto err_out; } } + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + fence = xe_pt_update_ops_run(tile, vops); + if (IS_ERR(fence)) + goto err_out; + + if (fences) + fences[current_fence++] = fence; + } + + if (fences) { + cf = dma_fence_array_create(number_tiles, fences, + vm->composite_fence_ctx, + vm->composite_fence_seqno++, + false); + if (!cf) { + --vm->composite_fence_seqno; + fence = ERR_PTR(-ENOMEM); + goto err_out; + } + fence = &cf->base; + } + + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + xe_pt_update_ops_fini(tile, vops); + } + + return fence; + +err_out: + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + xe_pt_update_ops_abort(tile, vops); + } + while (current_fence) + dma_fence_put(fences[--current_fence]); + kfree(fences); + kfree(cf); + return fence; } @@ -2955,12 +2665,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, fence = ops_execute(vm, vops); if (IS_ERR(fence)) { err = PTR_ERR(fence); - /* FIXME: Killing VM rather than proper error handling */ - xe_vm_kill(vm, false); goto unlock; - } else { - vm_bind_ioctl_ops_fini(vm, vops, fence); } + + vm_bind_ioctl_ops_fini(vm, vops, fence); } unlock: @@ -3317,8 +3025,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto unwind_ops; } - err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs, - &vops, i == args->num_binds - 1); + err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); if (err) goto unwind_ops; } diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index b481608b12f1..c864dba35e1d 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm) return drm_gpuvm_resv(&vm->gpuvm); } +void xe_vm_kill(struct xe_vm *vm, bool unlocked); + /** * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held. * @vm: The vm diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 211c88801182..27d651093d30 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -26,14 +26,12 @@ struct xe_vm_pgtable_update_op; #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1) #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2) -#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3) -#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4) -#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5) -#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6) -#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7) -#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8) -#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9) -#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10) +#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 3) +#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 4) +#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 5) +#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 6) +#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7) +#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8) /** struct xe_userptr - User pointer */ struct xe_userptr { @@ -100,6 +98,9 @@ struct xe_vma { */ u8 tile_present; + /** @tile_staged: bind is staged for this VMA */ + u8 tile_staged; + /** * @pat_index: The pat index to use when encoding the PTEs for this vma. */ @@ -315,31 +316,18 @@ struct xe_vma_op_prefetch { /** enum xe_vma_op_flags - flags for VMA operation */ enum xe_vma_op_flags { - /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */ - XE_VMA_OP_FIRST = BIT(0), - /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */ - XE_VMA_OP_LAST = BIT(1), /** @XE_VMA_OP_COMMITTED: VMA operation committed */ - XE_VMA_OP_COMMITTED = BIT(2), + XE_VMA_OP_COMMITTED = BIT(0), /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */ - XE_VMA_OP_PREV_COMMITTED = BIT(3), + XE_VMA_OP_PREV_COMMITTED = BIT(1), /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */ - XE_VMA_OP_NEXT_COMMITTED = BIT(4), + XE_VMA_OP_NEXT_COMMITTED = BIT(2), }; /** struct xe_vma_op - VMA operation */ struct xe_vma_op { /** @base: GPUVA base operation */ struct drm_gpuva_op base; - /** @q: exec queue for this operation */ - struct xe_exec_queue *q; - /** - * @syncs: syncs for this operation, only used on first and last - * operation - */ - struct xe_sync_entry *syncs; - /** @num_syncs: number of syncs */ - u32 num_syncs; /** @link: async operation link */ struct list_head link; /** @flags: operation flags */ @@ -363,19 +351,14 @@ struct xe_vma_ops { struct list_head list; /** @vm: VM */ struct xe_vm *vm; - /** @q: exec queue these operations */ + /** @q: exec queue for VMA operations */ struct xe_exec_queue *q; /** @syncs: syncs these operation */ struct xe_sync_entry *syncs; /** @num_syncs: number of syncs */ u32 num_syncs; /** @pt_update_ops: page table update operations */ - struct { - /** @ops: operations */ - struct xe_vm_pgtable_update_op *ops; - /** @num_ops: number of operations */ - u32 num_ops; - } pt_update_ops[XE_MAX_TILES_PER_DEVICE]; + struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE]; }; #endif From 282e6f846d8c3fcf36293f68f38d814645c3b852 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 3 Jul 2024 21:16:50 -0700 Subject: [PATCH 08/95] drm/xe: Update VM trace events The trace events have changed moving to a single job per VM bind IOCTL, update the trace events align with old behavior as much as possible. Signed-off-by: Matthew Brost Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-6-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_trace_bo.h | 10 ++++---- drivers/gpu/drm/xe/xe_vm.c | 42 ++++++++++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h index f39f09ed3495..9b1a1d4304ae 100644 --- a/drivers/gpu/drm/xe/xe_trace_bo.h +++ b/drivers/gpu/drm/xe/xe_trace_bo.h @@ -117,11 +117,6 @@ DEFINE_EVENT(xe_vma, xe_vma_acc, TP_ARGS(vma) ); -DEFINE_EVENT(xe_vma, xe_vma_fail, - TP_PROTO(struct xe_vma *vma), - TP_ARGS(vma) -); - DEFINE_EVENT(xe_vma, xe_vma_bind, TP_PROTO(struct xe_vma *vma), TP_ARGS(vma) @@ -237,6 +232,11 @@ DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit, TP_ARGS(vm) ); +DEFINE_EVENT(xe_vm, xe_vm_ops_fail, + TP_PROTO(struct xe_vm *vm), + TP_ARGS(vm) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 73cc6b0efcef..5232856cc3fb 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2481,6 +2481,38 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, return 0; } +static void op_trace(struct xe_vma_op *op) +{ + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + trace_xe_vma_bind(op->map.vma); + break; + case DRM_GPUVA_OP_REMAP: + trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va)); + if (op->remap.prev) + trace_xe_vma_bind(op->remap.prev); + if (op->remap.next) + trace_xe_vma_bind(op->remap.next); + break; + case DRM_GPUVA_OP_UNMAP: + trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va)); + break; + case DRM_GPUVA_OP_PREFETCH: + trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va)); + break; + default: + XE_WARN_ON("NOT POSSIBLE"); + } +} + +static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops) +{ + struct xe_vma_op *op; + + list_for_each_entry(op, &vops->list, link) + op_trace(op); +} + static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) { struct xe_exec_queue *q = vops->q; @@ -2524,8 +2556,10 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, if (number_tiles > 1) { fences = kmalloc_array(number_tiles, sizeof(*fences), GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); + if (!fences) { + fence = ERR_PTR(-ENOMEM); + goto err_trace; + } } for_each_tile(tile, vm->xe, id) { @@ -2539,6 +2573,8 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, } } + trace_xe_vm_ops_execute(vops); + for_each_tile(tile, vm->xe, id) { if (!vops->pt_update_ops[id].num_ops) continue; @@ -2585,6 +2621,8 @@ err_out: kfree(fences); kfree(cf); +err_trace: + trace_xe_vm_ops_fail(vm); return fence; } From a708f6501c692551e3d4ea618c44021f436730d9 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 3 Jul 2024 21:16:51 -0700 Subject: [PATCH 09/95] drm/xe: Update PT layer with better error handling Update PT layer so if a memory allocation for a PTE fails the error can be propagated to the user without requiring the VM to be killed. v5: - change return value invalidation_fence_init to void (Matthew Auld) v7: - Invert i,j usage in two places (Matthew Auld) - s/0/NULL (Matthew Auld) - Don't ignore return value of xe_pt_new_shared (Matthew Auld) - Don't check for NULL in xe_pt_entry (Matthew Auld) Signed-off-by: Matthew Brost Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-7-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_pt.c | 234 ++++++++++++++++++++++++++----------- 1 file changed, 168 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index f46f46d46819..44356903accb 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -846,19 +846,27 @@ xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *t } } -static void xe_pt_abort_bind(struct xe_vma *vma, - struct xe_vm_pgtable_update *entries, - u32 num_entries) +static void xe_pt_cancel_bind(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries) { u32 i, j; for (i = 0; i < num_entries; i++) { - if (!entries[i].pt_entries) + struct xe_pt *pt = entries[i].pt; + + if (!pt) continue; - for (j = 0; j < entries[i].qwords; j++) - xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL); + if (pt->level) { + for (j = 0; j < entries[i].qwords; j++) + xe_pt_destroy(entries[i].pt_entries[j].pt, + xe_vma_vm(vma)->flags, NULL); + } + kfree(entries[i].pt_entries); + entries[i].pt_entries = NULL; + entries[i].qwords = 0; } } @@ -874,10 +882,61 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma) xe_vm_assert_held(vm); } -static void xe_pt_commit_bind(struct xe_vma *vma, - struct xe_vm_pgtable_update *entries, - u32 num_entries, bool rebind, - struct llist_head *deferred) +static void xe_pt_commit(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries, struct llist_head *deferred) +{ + u32 i, j; + + xe_pt_commit_locks_assert(vma); + + for (i = 0; i < num_entries; i++) { + struct xe_pt *pt = entries[i].pt; + + if (!pt->level) + continue; + + for (j = 0; j < entries[i].qwords; j++) { + struct xe_pt *oldpte = entries[i].pt_entries[j].pt; + + xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred); + } + } +} + +static void xe_pt_abort_bind(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries, bool rebind) +{ + int i, j; + + xe_pt_commit_locks_assert(vma); + + for (i = num_entries - 1; i >= 0; --i) { + struct xe_pt *pt = entries[i].pt; + struct xe_pt_dir *pt_dir; + + if (!rebind) + pt->num_live -= entries[i].qwords; + + if (!pt->level) + continue; + + pt_dir = as_xe_pt_dir(pt); + for (j = 0; j < entries[i].qwords; j++) { + u32 j_ = j + entries[i].ofs; + struct xe_pt *newpte = xe_pt_entry(pt_dir, j_); + struct xe_pt *oldpte = entries[i].pt_entries[j].pt; + + pt_dir->children[j_] = oldpte ? &oldpte->base : 0; + xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL); + } + } +} + +static void xe_pt_commit_prepare_bind(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries, bool rebind) { u32 i, j; @@ -897,12 +956,13 @@ static void xe_pt_commit_bind(struct xe_vma *vma, for (j = 0; j < entries[i].qwords; j++) { u32 j_ = j + entries[i].ofs; struct xe_pt *newpte = entries[i].pt_entries[j].pt; + struct xe_pt *oldpte = NULL; if (xe_pt_entry(pt_dir, j_)) - xe_pt_destroy(xe_pt_entry(pt_dir, j_), - xe_vma_vm(vma)->flags, deferred); + oldpte = xe_pt_entry(pt_dir, j_); pt_dir->children[j_] = &newpte->base; + entries[i].pt_entries[j].pt = oldpte; } } } @@ -926,8 +986,6 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, err = xe_pt_stage_bind(tile, vma, entries, num_entries); if (!err) xe_tile_assert(tile, *num_entries); - else /* abort! */ - xe_pt_abort_bind(vma, entries, *num_entries); return err; } @@ -1305,10 +1363,10 @@ static void invalidation_fence_work_func(struct work_struct *w) ifence->end, ifence->asid); } -static int invalidation_fence_init(struct xe_gt *gt, - struct invalidation_fence *ifence, - struct dma_fence *fence, - u64 start, u64 end, u32 asid) +static void invalidation_fence_init(struct xe_gt *gt, + struct invalidation_fence *ifence, + struct dma_fence *fence, + u64 start, u64 end, u32 asid) { int ret; @@ -1340,8 +1398,6 @@ static int invalidation_fence_init(struct xe_gt *gt, } xe_gt_assert(gt, !ret || ret == -ENOENT); - - return ret && ret != -ENOENT ? ret : 0; } struct xe_pt_stage_unbind_walk { @@ -1426,6 +1482,7 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset, struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); pgoff_t end_offset; u64 size = 1ull << walk->shifts[--level]; + int err; if (!IS_ALIGNED(addr, size)) addr = xe_walk->modified_start; @@ -1441,7 +1498,10 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset, &end_offset)) return 0; - (void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, false); + err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true); + if (err) + return err; + xe_walk->wupd.updates[level].update->qwords = end_offset - offset; return 0; @@ -1509,32 +1569,54 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update, memset64(ptr, empty, num_qwords); } -static void -xe_pt_commit_unbind(struct xe_vma *vma, - struct xe_vm_pgtable_update *entries, u32 num_entries, - struct llist_head *deferred) +static void xe_pt_abort_unbind(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries) { - u32 j; + int i, j; xe_pt_commit_locks_assert(vma); - for (j = 0; j < num_entries; ++j) { - struct xe_vm_pgtable_update *entry = &entries[j]; + for (i = num_entries - 1; i >= 0; --i) { + struct xe_vm_pgtable_update *entry = &entries[i]; struct xe_pt *pt = entry->pt; + struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); + + pt->num_live += entry->qwords; + + if (!pt->level) + continue; + + for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) + pt_dir->children[i] = + entries[i].pt_entries[j - entry->ofs].pt ? + &entries[i].pt_entries[j - entry->ofs].pt->base : NULL; + } +} + +static void +xe_pt_commit_prepare_unbind(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries) +{ + int i, j; + + xe_pt_commit_locks_assert(vma); + + for (i = 0; i < num_entries; ++i) { + struct xe_vm_pgtable_update *entry = &entries[i]; + struct xe_pt *pt = entry->pt; + struct xe_pt_dir *pt_dir; pt->num_live -= entry->qwords; - if (pt->level) { - struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); - u32 i; + if (!pt->level) + continue; - for (i = entry->ofs; i < entry->ofs + entry->qwords; - i++) { - if (xe_pt_entry(pt_dir, i)) - xe_pt_destroy(xe_pt_entry(pt_dir, i), - xe_vma_vm(vma)->flags, deferred); - - pt_dir->children[i] = NULL; - } + pt_dir = as_xe_pt_dir(pt); + for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) { + entry->pt_entries[j - entry->ofs].pt = + xe_pt_entry(pt_dir, j); + pt_dir->children[j] = NULL; } } } @@ -1580,7 +1662,6 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, { u32 current_op = pt_update_ops->current_op; struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; - struct llist_head *deferred = &pt_update_ops->deferred; int err; xe_bo_assert_held(xe_vma_bo(vma)); @@ -1628,11 +1709,12 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, /* We bump also if batch_invalidate_tlb is true */ vm->tlb_flush_seqno++; - /* FIXME: Don't commit right away */ vma->tile_staged |= BIT(tile->id); pt_op->vma = vma; - xe_pt_commit_bind(vma, pt_op->entries, pt_op->num_entries, - pt_op->rebind, deferred); + xe_pt_commit_prepare_bind(vma, pt_op->entries, + pt_op->num_entries, pt_op->rebind); + } else { + xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries); } return err; @@ -1644,7 +1726,6 @@ static int unbind_op_prepare(struct xe_tile *tile, { u32 current_op = pt_update_ops->current_op; struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; - struct llist_head *deferred = &pt_update_ops->deferred; int err; if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id))) @@ -1680,9 +1761,7 @@ static int unbind_op_prepare(struct xe_tile *tile, pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); pt_update_ops->needs_invalidation = true; - /* FIXME: Don't commit right away */ - xe_pt_commit_unbind(vma, pt_op->entries, pt_op->num_entries, - deferred); + xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries); return 0; } @@ -1903,7 +1982,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) struct invalidation_fence *ifence = NULL; struct xe_range_fence *rfence; struct xe_vma_op *op; - int err = 0; + int err = 0, i; struct xe_migrate_pt_update update = { .ops = pt_update_ops->needs_userptr_lock ? &userptr_migrate_ops : @@ -1923,8 +2002,10 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) if (pt_update_ops->needs_invalidation) { ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); - if (!ifence) - return ERR_PTR(-ENOMEM); + if (!ifence) { + err = -ENOMEM; + goto kill_vm_tile1; + } } rfence = kzalloc(sizeof(*rfence), GFP_KERNEL); @@ -1939,6 +2020,15 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) goto free_rfence; } + /* Point of no return - VM killed if failure after this */ + for (i = 0; i < pt_update_ops->current_op; ++i) { + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; + + xe_pt_commit(pt_op->vma, pt_op->entries, + pt_op->num_entries, &pt_update_ops->deferred); + pt_op->vma = NULL; /* skip in xe_pt_update_ops_abort */ + } + if (xe_range_fence_insert(&vm->rftree[tile->id], rfence, &xe_range_fence_kfree_ops, pt_update_ops->start, @@ -1947,12 +2037,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) /* tlb invalidation must be done before signaling rebind */ if (ifence) { - err = invalidation_fence_init(tile->primary_gt, ifence, fence, - pt_update_ops->start, - pt_update_ops->last, - vm->usm.asid); - if (err) - goto put_fence; + invalidation_fence_init(tile->primary_gt, ifence, fence, + pt_update_ops->start, + pt_update_ops->last, vm->usm.asid); fence = &ifence->base.base; } @@ -1969,14 +2056,13 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) return fence; -put_fence: - if (pt_update_ops->needs_userptr_lock) - up_read(&vm->userptr.notifier_lock); - dma_fence_put(fence); free_rfence: kfree(rfence); free_ifence: kfree(ifence); +kill_vm_tile1: + if (err != -EAGAIN && tile->id) + xe_vm_kill(vops->vm, false); return ERR_PTR(err); } @@ -1997,12 +2083,10 @@ void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops) lockdep_assert_held(&vops->vm->lock); xe_vm_assert_held(vops->vm); - /* FIXME: Not 100% correct */ - for (i = 0; i < pt_update_ops->num_ops; ++i) { + for (i = 0; i < pt_update_ops->current_op; ++i) { struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; - if (pt_op->bind) - xe_pt_free_bind(pt_op->entries, pt_op->num_entries); + xe_pt_free_bind(pt_op->entries, pt_op->num_entries); } xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred); } @@ -2016,10 +2100,28 @@ void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops) */ void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops) { + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[tile->id]; + int i; + lockdep_assert_held(&vops->vm->lock); xe_vm_assert_held(vops->vm); - /* FIXME: Just kill VM for now + cleanup PTs */ + for (i = pt_update_ops->num_ops - 1; i >= 0; --i) { + struct xe_vm_pgtable_update_op *pt_op = + &pt_update_ops->ops[i]; + + if (!pt_op->vma || i >= pt_update_ops->current_op) + continue; + + if (pt_op->bind) + xe_pt_abort_bind(pt_op->vma, pt_op->entries, + pt_op->num_entries, + pt_op->rebind); + else + xe_pt_abort_unbind(pt_op->vma, pt_op->entries, + pt_op->num_entries); + } + xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred); - xe_vm_kill(vops->vm, false); } From 04e9c0ce19ac68afd8be7fd54772db3b0356cf75 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 3 Jul 2024 21:16:52 -0700 Subject: [PATCH 10/95] drm/xe: Add VM bind IOCTL error injection Add VM bind IOCTL error injection which steals MSB of the bind flags field which if set injects errors at various points in the VM bind IOCTL. Intended to validate error paths. Enabled by CONFIG_DRM_XE_DEBUG. v4: - Change define layout (Jonathan) Signed-off-by: Matthew Brost Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-8-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_device_types.h | 12 ++++++++++++ drivers/gpu/drm/xe/xe_pt.c | 12 ++++++++++++ drivers/gpu/drm/xe/xe_vm.c | 24 +++++++++++++++++++++++- drivers/gpu/drm/xe/xe_vm_types.h | 14 ++++++++++++++ 4 files changed, 61 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 3bca6d344744..f0cf9020e463 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -23,6 +23,10 @@ #include "xe_sriov_types.h" #include "xe_step_types.h" +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +#define TEST_VM_OPS_ERROR +#endif + #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) #include "soc/intel_pch.h" #include "intel_display_core.h" @@ -477,6 +481,14 @@ struct xe_device { int mode; } wedged; +#ifdef TEST_VM_OPS_ERROR + /** + * @vm_inject_error_position: inject errors at different places in VM + * bind IOCTL based on this value + */ + u8 vm_inject_error_position; +#endif + /* private: */ #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index 44356903accb..f391de908033 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -1860,6 +1860,12 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops) xe_tile_assert(tile, pt_update_ops->current_op <= pt_update_ops->num_ops); +#ifdef TEST_VM_OPS_ERROR + if (vops->inject_error && + vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE) + return -ENOSPC; +#endif + return 0; } @@ -2000,6 +2006,12 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) return dma_fence_get_stub(); } +#ifdef TEST_VM_OPS_ERROR + if (vops->inject_error && + vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN) + return ERR_PTR(-ENOSPC); +#endif + if (pt_update_ops->needs_invalidation) { ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); if (!ifence) { diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 5232856cc3fb..cf3aea5d8cdc 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2478,6 +2478,12 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, return err; } +#ifdef TEST_VM_OPS_ERROR + if (vops->inject_error && + vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK) + return -ENOSPC; +#endif + return 0; } @@ -2714,11 +2720,18 @@ unlock: return err; } -#define SUPPORTED_FLAGS \ +#define SUPPORTED_FLAGS_STUB \ (DRM_XE_VM_BIND_FLAG_READONLY | \ DRM_XE_VM_BIND_FLAG_IMMEDIATE | \ DRM_XE_VM_BIND_FLAG_NULL | \ DRM_XE_VM_BIND_FLAG_DUMPABLE) + +#ifdef TEST_VM_OPS_ERROR +#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR) +#else +#define SUPPORTED_FLAGS SUPPORTED_FLAGS_STUB +#endif + #define XE_64K_PAGE_MASK 0xffffull #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) @@ -3066,6 +3079,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); if (err) goto unwind_ops; + +#ifdef TEST_VM_OPS_ERROR + if (flags & FORCE_OP_ERROR) { + vops.inject_error = true; + vm->xe->vm_inject_error_position = + (vm->xe->vm_inject_error_position + 1) % + FORCE_OP_ERROR_COUNT; + } +#endif } /* Nothing to do */ diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 27d651093d30..7f9a303e51d8 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -23,6 +23,16 @@ struct xe_user_fence; struct xe_vm; struct xe_vm_pgtable_update_op; +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +#define TEST_VM_OPS_ERROR +#define FORCE_OP_ERROR BIT(31) + +#define FORCE_OP_ERROR_LOCK 0 +#define FORCE_OP_ERROR_PREPARE 1 +#define FORCE_OP_ERROR_RUN 2 +#define FORCE_OP_ERROR_COUNT 3 +#endif + #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1) #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2) @@ -359,6 +369,10 @@ struct xe_vma_ops { u32 num_syncs; /** @pt_update_ops: page table update operations */ struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE]; +#ifdef TEST_VM_OPS_ERROR + /** @inject_error: inject error to test error handling */ + bool inject_error; +#endif }; #endif From 9dae9751c7b0086963f5cbb82424b5e4cf58f123 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 2 Jul 2024 20:37:02 +0200 Subject: [PATCH 11/95] drm/xe: Fix register definition order in xe_regs.h Swap XEHP_CLOCK_GATE_DIS(0x101014) with GU_DEBUG(x101018). Signed-off-by: Michal Wajdeczko Reviewed-by: Matt Roper Reviewed-by: Himal Prasad Ghimiray Link: https://patchwork.freedesktop.org/patch/msgid/20240702183704.1022-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/regs/xe_regs.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h index 23e33ec84902..23ecba38ed41 100644 --- a/drivers/gpu/drm/xe/regs/xe_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_regs.h @@ -24,12 +24,12 @@ #define LMEM_INIT REG_BIT(7) #define DRIVERFLR REG_BIT(31) -#define GU_DEBUG XE_REG(0x101018) -#define DRIVERFLR_STATUS REG_BIT(31) - #define XEHP_CLOCK_GATE_DIS XE_REG(0x101014) #define SGSI_SIDECLK_DIS REG_BIT(17) +#define GU_DEBUG XE_REG(0x101018) +#define DRIVERFLR_STATUS REG_BIT(31) + #define XEHP_MTCFG_ADDR XE_REG(0x101800) #define TILE_COUNT REG_GENMASK(15, 8) From 466a6c3855cf00653c14a92a6e9f8ae50077b77d Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 2 Jul 2024 20:37:03 +0200 Subject: [PATCH 12/95] drm/xe: Kill regs/xe_sriov_regs.h There is no real benefit to maintain a separate file. The register definitions related to SR-IOV can be placed in existing headers. Signed-off-by: Michal Wajdeczko Reviewed-by: Matt Roper Reviewed-by: Himal Prasad Ghimiray Link: https://patchwork.freedesktop.org/patch/msgid/20240702183704.1022-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/regs/xe_gt_regs.h | 6 ++++++ drivers/gpu/drm/xe/regs/xe_regs.h | 6 ++++++ drivers/gpu/drm/xe/regs/xe_sriov_regs.h | 23 ----------------------- drivers/gpu/drm/xe/xe_gt_sriov_pf.c | 2 +- drivers/gpu/drm/xe/xe_lmtt.c | 2 +- drivers/gpu/drm/xe/xe_sriov.c | 2 +- 6 files changed, 15 insertions(+), 26 deletions(-) delete mode 100644 drivers/gpu/drm/xe/regs/xe_sriov_regs.h diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index d44564bad009..141d0e0faa21 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -88,6 +88,8 @@ #define VE1_AUX_INV XE_REG(0x42b8) #define AUX_INV REG_BIT(0) +#define XE2_LMEM_CFG XE_REG(0x48b0) + #define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4) #define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910) #define XEHP_FLAT_CCS_PTR REG_GENMASK(31, 8) @@ -395,6 +397,10 @@ #define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12) #define GLOBAL_INVALIDATION_MODE REG_BIT(2) +#define LMEM_CFG XE_REG(0xcf58) +#define LMEM_EN REG_BIT(31) +#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */ + #define HALF_SLICE_CHICKEN5 XE_REG_MCR(0xe188, XE_REG_OPTION_MASKED) #define DISABLE_SAMPLE_G_PERFORMANCE REG_BIT(0) diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h index 23ecba38ed41..55bf47c99016 100644 --- a/drivers/gpu/drm/xe/regs/xe_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_regs.h @@ -30,6 +30,9 @@ #define GU_DEBUG XE_REG(0x101018) #define DRIVERFLR_STATUS REG_BIT(31) +#define VIRTUAL_CTRL_REG XE_REG(0x10108c) +#define GUEST_GTT_UPDATE_EN REG_BIT(8) + #define XEHP_MTCFG_ADDR XE_REG(0x101800) #define TILE_COUNT REG_GENMASK(15, 8) @@ -66,6 +69,9 @@ #define DISPLAY_IRQ REG_BIT(16) #define GT_DW_IRQ(x) REG_BIT(x) +#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF) +#define VF_CAP REG_BIT(0) + #define PVC_RP_STATE_CAP XE_REG(0x281014) #endif diff --git a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h deleted file mode 100644 index 017b4ddd1ecf..000000000000 --- a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef _REGS_XE_SRIOV_REGS_H_ -#define _REGS_XE_SRIOV_REGS_H_ - -#include "regs/xe_reg_defs.h" - -#define XE2_LMEM_CFG XE_REG(0x48b0) - -#define LMEM_CFG XE_REG(0xcf58) -#define LMEM_EN REG_BIT(31) -#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */ - -#define VIRTUAL_CTRL_REG XE_REG(0x10108c) -#define GUEST_GTT_UPDATE_EN REG_BIT(8) - -#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF) -#define VF_CAP REG_BIT(0) - -#endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c index 9dbba9ab7a9a..ef239440963c 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c @@ -5,7 +5,7 @@ #include -#include "regs/xe_sriov_regs.h" +#include "regs/xe_regs.h" #include "xe_gt_sriov_pf.h" #include "xe_gt_sriov_pf_config.h" diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 418661a88918..c5fdb36b6d33 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -7,7 +7,7 @@ #include -#include "regs/xe_sriov_regs.h" +#include "regs/xe_gt_regs.h" #include "xe_assert.h" #include "xe_bo.h" diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c index a274a5fb1401..5a1d65e4f19f 100644 --- a/drivers/gpu/drm/xe/xe_sriov.c +++ b/drivers/gpu/drm/xe/xe_sriov.c @@ -5,7 +5,7 @@ #include -#include "regs/xe_sriov_regs.h" +#include "regs/xe_regs.h" #include "xe_assert.h" #include "xe_device.h" From 3078d9c8b6a0939bc732fd1c36ef86c0178127dd Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 2 Jul 2024 20:37:04 +0200 Subject: [PATCH 13/95] drm/xe: Use VF_CAP_REG for device wmb To force a write barrier on the device memory, we write to the SOFTWARE_FLAGS_SPR33 register, but this particular register was selected because it was one of the writable and unused register. Since a write barrier should also work if we use the read-only register, switch to VF_CAP_REG register that is also marked as accessible for VFs. While at it, add simple kernel-doc for xe_device_wmb() function. Signed-off-by: Michal Wajdeczko Cc: Matt Roper Reviewed-by: Matt Roper Reviewed-by: Himal Prasad Ghimiray Link: https://patchwork.freedesktop.org/patch/msgid/20240702183704.1022-4-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_device.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 03492fbcb8fb..db513175b29e 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -744,13 +744,22 @@ void xe_device_shutdown(struct xe_device *xe) { } +/** + * xe_device_wmb() - Device specific write memory barrier + * @xe: the &xe_device + * + * While wmb() is sufficient for a barrier if we use system memory, on discrete + * platforms with device memory we additionally need to issue a register write. + * Since it doesn't matter which register we write to, use the read-only VF_CAP + * register that is also marked as accessible by the VFs. + */ void xe_device_wmb(struct xe_device *xe) { struct xe_gt *gt = xe_root_mmio_gt(xe); wmb(); if (IS_DGFX(xe)) - xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0); + xe_mmio_write32(gt, VF_CAP_REG, 0); } /** From 01570b446939c3538b1aa3d059837f49fa14a3ae Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Wed, 3 Jul 2024 13:43:38 +0100 Subject: [PATCH 14/95] drm/xe/bmg: implement Wa_16023588340 This involves enabling l2 caching of host side memory access to VRAM through the CPU BAR. The main fallout here is with display since VRAM writes from CPU can now be cached in GPU l2, and display is never coherent with caches, so needs various manual flushing. In the case of fbc we disable it due to complications in getting this to work correctly (in a later patch). Signed-off-by: Matthew Auld Cc: Jonathan Cavitt Cc: Matt Roper Cc: Lucas De Marchi Cc: Vinod Govindapillai Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240703124338.208220-3-matthew.auld@intel.com --- drivers/gpu/drm/xe/Makefile | 2 + drivers/gpu/drm/xe/display/xe_dsb_buffer.c | 8 ++++ drivers/gpu/drm/xe/display/xe_fb_pin.c | 3 ++ drivers/gpu/drm/xe/regs/xe_gt_regs.h | 8 ++++ drivers/gpu/drm/xe/xe_device.c | 30 ++++++++++++ drivers/gpu/drm/xe/xe_device.h | 1 + drivers/gpu/drm/xe/xe_gt.c | 54 ++++++++++++++++++++++ drivers/gpu/drm/xe/xe_pat.c | 11 ++++- drivers/gpu/drm/xe/xe_wa_oob.rules | 1 + 9 files changed, 117 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 628c245c4822..e97c9da451b3 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -25,12 +25,14 @@ $(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \ uses_generated_oob := \ $(obj)/xe_ggtt.o \ + $(obj)/xe_device.o \ $(obj)/xe_gsc.o \ $(obj)/xe_gt.o \ $(obj)/xe_guc.o \ $(obj)/xe_guc_ads.o \ $(obj)/xe_guc_pc.o \ $(obj)/xe_migrate.o \ + $(obj)/xe_pat.o \ $(obj)/xe_ring_ops.o \ $(obj)/xe_vm.o \ $(obj)/xe_wa.o \ diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c index 9e860c61f4b3..ccd0d87d438a 100644 --- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c +++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c @@ -7,6 +7,8 @@ #include "intel_display_types.h" #include "intel_dsb_buffer.h" #include "xe_bo.h" +#include "xe_device.h" +#include "xe_device_types.h" #include "xe_gt.h" u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) @@ -16,7 +18,10 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val) { + struct xe_device *xe = dsb_buf->vma->bo->tile->xe; + iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val); + xe_device_l2_flush(xe); } u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) @@ -26,9 +31,12 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size) { + struct xe_device *xe = dsb_buf->vma->bo->tile->xe; + WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf)); iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size); + xe_device_l2_flush(xe); } bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size) diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index d270bcd11686..ea83d180ab70 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -10,6 +10,7 @@ #include "intel_fb.h" #include "intel_fb_pin.h" #include "xe_bo.h" +#include "xe_device.h" #include "xe_ggtt.h" #include "xe_gt.h" #include "xe_pm.h" @@ -304,6 +305,8 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, if (ret) goto err_unpin; + /* Ensure DPT writes are flushed */ + xe_device_l2_flush(xe); return vma; err_unpin: diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 141d0e0faa21..8a94a94d2267 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -80,6 +80,9 @@ #define LE_CACHEABILITY_MASK REG_GENMASK(1, 0) #define LE_CACHEABILITY(value) REG_FIELD_PREP(LE_CACHEABILITY_MASK, value) +#define XE2_GAMREQSTRM_CTRL XE_REG(0x4194) +#define CG_DIS_CNTLBUS REG_BIT(6) + #define CCS_AUX_INV XE_REG(0x4208) #define VD0_AUX_INV XE_REG(0x4218) @@ -374,6 +377,11 @@ #define XEHPC_L3CLOS_MASK(i) XE_REG_MCR(0xb194 + (i) * 8) +#define XE2_GLOBAL_INVAL XE_REG(0xb404) + +#define SCRATCH1LPFC XE_REG(0xb474) +#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0) + #define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658) #define XE2_TDF_CTRL XE_REG(0xb418) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index db513175b29e..64aea962afd5 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -54,6 +54,9 @@ #include "xe_vm.h" #include "xe_vram.h" #include "xe_wait_user_fence.h" +#include "xe_wa.h" + +#include static int xe_file_open(struct drm_device *dev, struct drm_file *file) { @@ -788,6 +791,11 @@ void xe_device_td_flush(struct xe_device *xe) if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) return; + if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) { + xe_device_l2_flush(xe); + return; + } + for_each_gt(gt, xe, id) { if (xe_gt_is_media_type(gt)) continue; @@ -811,6 +819,28 @@ void xe_device_td_flush(struct xe_device *xe) } } +void xe_device_l2_flush(struct xe_device *xe) +{ + struct xe_gt *gt; + int err; + + gt = xe_root_mmio_gt(xe); + + if (!XE_WA(gt, 16023588340)) + return; + + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (err) + return; + + xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1); + + if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) + xe_gt_err_once(gt, "Global invalidation timeout\n"); + + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); +} + u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) { return xe_device_has_flat_ccs(xe) ? diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index bb07f5669dbb..0a2a3e7fd402 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -162,6 +162,7 @@ u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address); u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address); void xe_device_td_flush(struct xe_device *xe); +void xe_device_l2_flush(struct xe_device *xe); static inline bool xe_device_wedged(struct xe_device *xe) { diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 0ba2e2d0289b..ce8994b808fe 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -11,6 +11,8 @@ #include #include +#include + #include "instructions/xe_gfxpipe_commands.h" #include "instructions/xe_mi_commands.h" #include "regs/xe_gt_regs.h" @@ -95,6 +97,51 @@ void xe_gt_sanitize(struct xe_gt *gt) gt->uc.guc.submission_state.enabled = false; } +static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) +{ + u32 reg; + int err; + + if (!XE_WA(gt, 16023588340)) + return; + + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (WARN_ON(err)) + return; + + if (!xe_gt_is_media_type(gt)) { + xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH); + reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL); + reg |= CG_DIS_CNTLBUS; + xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg); + } + + xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3); + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); +} + +static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) +{ + u32 reg; + int err; + + if (!XE_WA(gt, 16023588340)) + return; + + if (xe_gt_is_media_type(gt)) + return; + + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (WARN_ON(err)) + return; + + reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL); + reg &= ~CG_DIS_CNTLBUS; + xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg); + + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); +} + /** * xe_gt_remove() - Clean up the GT structures before driver removal * @gt: the GT object @@ -111,6 +158,8 @@ void xe_gt_remove(struct xe_gt *gt) for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); + + xe_gt_disable_host_l2_vram(gt); } static void gt_reset_worker(struct work_struct *w); @@ -508,6 +557,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt) xe_gt_mcr_init_early(gt); xe_pat_init(gt); + xe_gt_enable_host_l2_vram(gt); err = xe_uc_init(>->uc); if (err) @@ -643,6 +693,8 @@ static int do_gt_restart(struct xe_gt *gt) xe_pat_init(gt); + xe_gt_enable_host_l2_vram(gt); + xe_gt_mcr_set_implicit_defaults(gt); xe_reg_sr_apply_mmio(>->reg_sr, gt); @@ -796,6 +848,8 @@ int xe_gt_suspend(struct xe_gt *gt) xe_gt_idle_disable_pg(gt); + xe_gt_disable_host_l2_vram(gt); + XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); xe_gt_dbg(gt, "suspended\n"); diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c index 4ee32ee1cc88..722278cc23fc 100644 --- a/drivers/gpu/drm/xe/xe_pat.c +++ b/drivers/gpu/drm/xe/xe_pat.c @@ -7,6 +7,8 @@ #include +#include + #include "regs/xe_reg_defs.h" #include "xe_assert.h" #include "xe_device.h" @@ -15,6 +17,7 @@ #include "xe_gt_mcr.h" #include "xe_mmio.h" #include "xe_sriov.h" +#include "xe_wa.h" #define _PAT_ATS 0x47fc #define _PAT_INDEX(index) _PICK_EVEN_2RANGES(index, 8, \ @@ -382,7 +385,13 @@ void xe_pat_init_early(struct xe_device *xe) if (GRAPHICS_VER(xe) == 20) { xe->pat.ops = &xe2_pat_ops; xe->pat.table = xe2_pat_table; - xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table); + + /* Wa_16023588340. XXX: Should use XE_WA */ + if (GRAPHICS_VERx100(xe) == 2001) + xe->pat.n_entries = 28; /* Disable CLOS3 */ + else + xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table); + xe->pat.idx[XE_CACHE_NONE] = 3; xe->pat.idx[XE_CACHE_WT] = 15; xe->pat.idx[XE_CACHE_WB] = 2; diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 26066beb4f6f..08f7336881e3 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -29,3 +29,4 @@ 13011645652 GRAPHICS_VERSION(2004) 22019338487 MEDIA_VERSION(2000) GRAPHICS_VERSION(2001) +16023588340 GRAPHICS_VERSION(2001) From c55f79f317ab428ae6d005965bc07e37496f209f Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Wed, 3 Jul 2024 13:43:39 +0100 Subject: [PATCH 15/95] drm/i915: disable fbc due to Wa_16023588340 On BMG-G21 we need to disable fbc due to complications around the WA. v2: - Try to handle with i915_drv.h and compat layer. (Rodrigo) v3: - For simplicity retreat back to the original design for now. - Drop the extra \ from the Makefile (Jani) Signed-off-by: Matthew Auld Cc: Jonathan Cavitt Cc: Matt Roper Cc: Lucas De Marchi Cc: Vinod Govindapillai Cc: Jani Nikula Cc: intel-gfx@lists.freedesktop.org Reviewed-by: Jonathan Cavitt Acked-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20240703124338.208220-4-matthew.auld@intel.com --- drivers/gpu/drm/i915/display/intel_display_wa.h | 8 ++++++++ drivers/gpu/drm/i915/display/intel_fbc.c | 6 ++++++ drivers/gpu/drm/xe/Makefile | 4 +++- drivers/gpu/drm/xe/display/xe_display_wa.c | 16 ++++++++++++++++ 4 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/xe/display/xe_display_wa.c diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h index 63201d09852c..be644ab6ae00 100644 --- a/drivers/gpu/drm/i915/display/intel_display_wa.h +++ b/drivers/gpu/drm/i915/display/intel_display_wa.h @@ -6,8 +6,16 @@ #ifndef __INTEL_DISPLAY_WA_H__ #define __INTEL_DISPLAY_WA_H__ +#include + struct drm_i915_private; void intel_display_wa_apply(struct drm_i915_private *i915); +#ifdef I915 +static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; } +#else +bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915); +#endif + #endif diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 67116c9f1464..8488f82143a4 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -56,6 +56,7 @@ #include "intel_display_device.h" #include "intel_display_trace.h" #include "intel_display_types.h" +#include "intel_display_wa.h" #include "intel_fbc.h" #include "intel_fbc_regs.h" #include "intel_frontbuffer.h" @@ -1237,6 +1238,11 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } + if (intel_display_needs_wa_16023588340(i915)) { + plane_state->no_fbc_reason = "Wa_16023588340"; + return 0; + } + /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ if (i915_vtd_active(i915) && (IS_SKYLAKE(i915) || IS_BROXTON(i915))) { plane_state->no_fbc_reason = "VT-d enabled"; diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index e97c9da451b3..0eb0acc4f198 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -36,7 +36,8 @@ uses_generated_oob := \ $(obj)/xe_ring_ops.o \ $(obj)/xe_vm.o \ $(obj)/xe_wa.o \ - $(obj)/xe_ttm_stolen_mgr.o + $(obj)/xe_ttm_stolen_mgr.o \ + $(obj)/display/xe_display_wa.o $(uses_generated_oob): $(generated_oob) @@ -194,6 +195,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ display/xe_display.o \ display/xe_display_misc.o \ display/xe_display_rps.o \ + display/xe_display_wa.o \ display/xe_dsb_buffer.o \ display/xe_fb_pin.o \ display/xe_hdcp_gsc.o \ diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c new file mode 100644 index 000000000000..68e3d1959ad6 --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_display_wa.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include "intel_display_wa.h" + +#include "xe_device.h" +#include "xe_wa.h" + +#include + +bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) +{ + return XE_WA(xe_root_mmio_gt(i915), 16023588340); +} From 01e0cfc994be484ddcb9e121e353e51d8bb837c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Fri, 5 Jul 2024 15:28:28 +0200 Subject: [PATCH 16/95] drm/xe: Use write-back caching mode for system memory on DGFX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The caching mode for buffer objects with VRAM as a possible placement was forced to write-combined, regardless of placement. However, write-combined system memory is expensive to allocate and even though it is pooled, the pool is expensive to shrink, since it involves global CPU TLB flushes. Moreover write-combined system memory from TTM is only reliably available on x86 and DGFX doesn't have an x86 restriction. So regardless of the cpu caching mode selected for a bo, internally use write-back caching mode for system memory on DGFX. Coherency is maintained, but user-space clients may perceive a difference in cpu access speeds. v2: - Update RB- and Ack tags. - Rephrase wording in xe_drm.h (Matt Roper) v3: - Really rephrase wording. Signed-off-by: Thomas Hellström Fixes: 622f709ca629 ("drm/xe/uapi: Add support for CPU caching mode") Cc: Pallavi Mishra Cc: Matthew Auld Cc: dri-devel@lists.freedesktop.org Cc: Joonas Lahtinen Cc: Effie Yu Cc: Matthew Brost Cc: Maarten Lankhorst Cc: Jose Souza Cc: Michal Mrozek Cc: # v6.8+ Acked-by: Matthew Auld Acked-by: José Roberto de Souza Reviewed-by: Rodrigo Vivi Fixes: 622f709ca629 ("drm/xe/uapi: Add support for CPU caching mode") Acked-by: Michal Mrozek Acked-by: Effie Yu #On chat Link: https://patchwork.freedesktop.org/patch/msgid/20240705132828.27714-1-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/xe/xe_bo.c | 47 +++++++++++++++++++------------- drivers/gpu/drm/xe/xe_bo_types.h | 3 +- include/uapi/drm/xe_drm.h | 8 +++++- 3 files changed, 37 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 65c696966e96..31192d983d9e 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -343,7 +343,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, struct xe_device *xe = xe_bo_device(bo); struct xe_ttm_tt *tt; unsigned long extra_pages; - enum ttm_caching caching; + enum ttm_caching caching = ttm_cached; int err; tt = kzalloc(sizeof(*tt), GFP_KERNEL); @@ -357,26 +357,35 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), PAGE_SIZE); - switch (bo->cpu_caching) { - case DRM_XE_GEM_CPU_CACHING_WC: - caching = ttm_write_combined; - break; - default: - caching = ttm_cached; - break; - } - - WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); - /* - * Display scanout is always non-coherent with the CPU cache. - * - * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and - * require a CPU:WC mapping. + * DGFX system memory is always WB / ttm_cached, since + * other caching modes are only supported on x86. DGFX + * GPU system memory accesses are always coherent with the + * CPU. */ - if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || - (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE)) - caching = ttm_write_combined; + if (!IS_DGFX(xe)) { + switch (bo->cpu_caching) { + case DRM_XE_GEM_CPU_CACHING_WC: + caching = ttm_write_combined; + break; + default: + caching = ttm_cached; + break; + } + + WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); + + /* + * Display scanout is always non-coherent with the CPU cache. + * + * For Xe_LPG and beyond, PPGTT PTE lookups are also + * non-coherent and require a CPU:WC mapping. + */ + if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || + (xe->info.graphics_verx100 >= 1270 && + bo->flags & XE_BO_FLAG_PAGETABLE)) + caching = ttm_write_combined; + } if (bo->flags & XE_BO_FLAG_NEEDS_UC) { /* diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 02d68873558a..ebc8abf7930a 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -68,7 +68,8 @@ struct xe_bo { /** * @cpu_caching: CPU caching mode. Currently only used for userspace - * objects. + * objects. Exceptions are system memory on DGFX, which is always + * WB. */ u16 cpu_caching; diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 33544ef78d3e..19619d4952a8 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -783,7 +783,13 @@ struct drm_xe_gem_create { #define DRM_XE_GEM_CPU_CACHING_WC 2 /** * @cpu_caching: The CPU caching mode to select for this object. If - * mmaping the object the mode selected here will also be used. + * mmaping the object the mode selected here will also be used. The + * exception is when mapping system memory (including data evicted + * to system) on discrete GPUs. The caching mode selected will + * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency + * between GPU- and CPU is guaranteed. The caching mode of + * existing CPU-mappings will be updated transparently to + * user-space clients. */ __u16 cpu_caching; /** @pad: MBZ */ From 74e3076800067c6dc0dcff5b75344cec064c20eb Mon Sep 17 00:00:00 2001 From: Ngai-Mint Kwan Date: Mon, 1 Jul 2024 11:46:37 -0700 Subject: [PATCH 17/95] drm/xe/xe2lpm: Extend Wa_16021639441 Wa_16021639441 applies to Xe2_LPM. Signed-off-by: Ngai-Mint Kwan Reviewed-by: Matt Roper Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240701184637.531794-1-ngai-mint.kwan@linux.intel.com --- drivers/gpu/drm/xe/xe_wa.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index c7bf0862b231..6c52d9d02b5f 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -539,6 +539,16 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) }, + /* Xe2_LPM */ + + { XE_RTP_NAME("16021639441"), + XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), + GHWSP_CSB_REPORT_DIS | + PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS, + XE_RTP_ACTION_FLAG(ENGINE_BASE))) + }, + /* Xe2_HPM */ { XE_RTP_NAME("16021639441"), From caaf1f44a6a27bae33eee189842c4d8fc21c3b02 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 8 Jul 2024 14:10:08 -0700 Subject: [PATCH 18/95] drm/xe: Drop trace_xe_hw_fence_free MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fence->ctx may be stale memory when trace_xe_hw_fence_free is called resuling UAF bug when deriving the device name. This tracepoint is not all that useful, so just drop it. Fixes: 501c4255c409 ("drm/xe/trace: Print device_id in xe_trace events") Cc: Ville Syrjälä Cc: Lucas De Marchi Cc: Gustavo Sousa Cc: Radhakrishna Sripada Cc: Matt Roper Signed-off-by: Matthew Brost Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20240708211008.956384-1-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_hw_fence.c | 1 - drivers/gpu/drm/xe/xe_trace.h | 5 ----- 2 files changed, 6 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c index 35c0063a831a..45a9789cf501 100644 --- a/drivers/gpu/drm/xe/xe_hw_fence.c +++ b/drivers/gpu/drm/xe/xe_hw_fence.c @@ -187,7 +187,6 @@ static void xe_hw_fence_release(struct dma_fence *dma_fence) { struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence); - trace_xe_hw_fence_free(fence); XE_WARN_ON(!list_empty(&fence->irq_link)); call_rcu(&dma_fence->rcu, fence_free); } diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h index 09ca1ad057b0..baba14fb1e32 100644 --- a/drivers/gpu/drm/xe/xe_trace.h +++ b/drivers/gpu/drm/xe/xe_trace.h @@ -341,11 +341,6 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal, TP_ARGS(fence) ); -DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free, - TP_PROTO(struct xe_hw_fence *fence), - TP_ARGS(fence) -); - TRACE_EVENT(xe_reg_rw, TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len), From 56ab6986992ba143aee0bda33e15a764343e271d Mon Sep 17 00:00:00 2001 From: Bommu Krishnaiah Date: Wed, 3 Jul 2024 14:37:54 +0530 Subject: [PATCH 19/95] drm/xe/xe2lpg: Extend workaround 14021402888 workaround 14021402888 also applies to Xe2_LPG. Replicate the existing entry to one specific for Xe2_LPG. Signed-off-by: Bommu Krishnaiah Cc: Tejas Upadhyay Cc: Matt Roper Cc: Himal Prasad Ghimiray Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240703090754.1323647-1-krishnaiah.bommu@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_wa.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 6c52d9d02b5f..fd009b2c68fa 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -486,6 +486,10 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, SLM_WMTP_RESTORE)) }, + { XE_RTP_NAME("14021402888"), + XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) + }, /* Xe2_HPG */ From 3d122660dc70029d9cccb4e8670125f0affa959e Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 8 Jul 2024 10:33:01 -0700 Subject: [PATCH 20/95] drm/xe/gt: Remove double include The header generated/xe_wa_oob.h is included twice. Remove one. Fixes: 01570b446939 ("drm/xe/bmg: implement Wa_16023588340") Reported-by: kernel test robot Closes: https://lore.kernel.org/r/202407052122.AzuWSPuo-lkp@intel.com/ Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20240708173301.1543871-1-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_gt.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index ce8994b808fe..b04e47186f5b 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -9,7 +9,6 @@ #include #include -#include #include From ea74bf9ccba9ae80fc0766c07c4abaef927e9e63 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 8 Jul 2024 14:29:06 -0700 Subject: [PATCH 21/95] drm/xe: Generate oob before compiling anything Instead of keep adding more dependencies as WAs are needed in different places of the driver, just add a rule with all the objects so the code generation happens before anything else. While at it, group lines related to wa_oob in the Makefile. v2: Prefix $(obj) when declaring dependency Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20240708213041.1734028-1-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/Makefile | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 0eb0acc4f198..1ff9602a52f6 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -12,35 +12,15 @@ subdir-ccflags-$(CONFIG_DRM_XE_WERROR) += -Werror subdir-ccflags-y += -I$(obj) -I$(src) # generated sources + hostprogs := xe_gen_wa_oob - generated_oob := $(obj)/generated/xe_wa_oob.c $(obj)/generated/xe_wa_oob.h - quiet_cmd_wa_oob = GEN $(notdir $(generated_oob)) cmd_wa_oob = mkdir -p $(@D); $^ $(generated_oob) - $(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \ $(src)/xe_wa_oob.rules $(call cmd,wa_oob) -uses_generated_oob := \ - $(obj)/xe_ggtt.o \ - $(obj)/xe_device.o \ - $(obj)/xe_gsc.o \ - $(obj)/xe_gt.o \ - $(obj)/xe_guc.o \ - $(obj)/xe_guc_ads.o \ - $(obj)/xe_guc_pc.o \ - $(obj)/xe_migrate.o \ - $(obj)/xe_pat.o \ - $(obj)/xe_ring_ops.o \ - $(obj)/xe_vm.o \ - $(obj)/xe_wa.o \ - $(obj)/xe_ttm_stolen_mgr.o \ - $(obj)/display/xe_display_wa.o - -$(uses_generated_oob): $(generated_oob) - # Please keep these build lists sorted! # core driver code @@ -324,3 +304,6 @@ quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@) $(obj)/%.hdrtest: $(src)/%.h FORCE $(call if_changed_dep,hdrtest) + +uses_generated_oob := $(addprefix $(obj)/, $(xe-y)) +$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h From 33891539f9d6f245e93a76e3fb5791338180374f Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Mon, 8 Jul 2024 14:59:18 +0200 Subject: [PATCH 22/95] drm/xe/display/xe_hdcp_gsc: Free arbiter on driver removal Free arbiter allocated in intel_hdcp_gsc_init(). Fixes: 152f2df954d8 ("drm/xe/hdcp: Enable HDCP for XE") Cc: Suraj Kandpal Cc: Arun R Murthy Cc: Lucas De Marchi Cc: Rodrigo Vivi Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20240708125918.23573-1-nirmoy.das@intel.com Signed-off-by: Nirmoy Das --- drivers/gpu/drm/xe/display/xe_hdcp_gsc.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index 14b8b4278317..990285aa9b26 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -160,12 +160,16 @@ void intel_hdcp_gsc_fini(struct xe_device *xe) { struct intel_hdcp_gsc_message *hdcp_message = xe->display.hdcp.hdcp_message; + struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter; - if (!hdcp_message) - return; + if (hdcp_message) { + xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); + kfree(hdcp_message); + xe->display.hdcp.hdcp_message = NULL; + } - xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); - kfree(hdcp_message); + kfree(arb); + xe->display.hdcp.arbiter = NULL; } static int xe_gsc_send_sync(struct xe_device *xe, From 71733b8d7f50b61403f940c6c9745fb3a9b98dcb Mon Sep 17 00:00:00 2001 From: Tejas Upadhyay Date: Wed, 10 Jul 2024 10:57:50 +0530 Subject: [PATCH 23/95] drm/xe/xe2: Make subsequent L2 flush sequential Issuing the flush on top of an ongoing flush is not desirable. Lets use lock to make it sequential. Reviewed-by: Nirmoy Das Signed-off-by: Tejas Upadhyay Link: https://patchwork.freedesktop.org/patch/msgid/20240710052750.3031586-1-tejas.upadhyay@intel.com Signed-off-by: Nirmoy Das --- drivers/gpu/drm/xe/xe_device.c | 2 ++ drivers/gpu/drm/xe/xe_gt.c | 1 + drivers/gpu/drm/xe/xe_gt_types.h | 6 ++++++ 3 files changed, 9 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 64aea962afd5..06cebaffb451 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -833,10 +833,12 @@ void xe_device_l2_flush(struct xe_device *xe) if (err) return; + spin_lock(>->global_invl_lock); xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1); if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); + spin_unlock(>->global_invl_lock); xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); } diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index b04e47186f5b..85f974441d50 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -387,6 +387,7 @@ int xe_gt_init_early(struct xe_gt *gt) xe_force_wake_init_gt(gt, gt_to_fw(gt)); xe_pcode_init(gt); + spin_lock_init(>->global_invl_lock); return 0; } diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index 6b5e0b45efb0..38a0d0e178c8 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -362,6 +362,12 @@ struct xe_gt { */ spinlock_t mcr_lock; + /** + * @global_invl_lock: protects the register for the duration + * of a global invalidation of l2 cache + */ + spinlock_t global_invl_lock; + /** @wa_active: keep track of active workarounds */ struct { /** @wa_active.gt: bitmap with active GT workarounds */ From f6ca930d974e473fd608fc9aa1759fbe731fe44d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Wed, 10 Jul 2024 14:31:49 -0700 Subject: [PATCH 24/95] drm/xe: Add process name and PID to job timedout message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will be very helpful for Mesa CI, where it uses PID to match the exacly test that cause timedout/GPU hang and mark that test as failing. Also printing the process name as it might be relavant for human readers. Cc: Rodrigo Vivi Signed-off-by: José Roberto de Souza Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20240710213149.57662-1-jose.souza@intel.com --- drivers/gpu/drm/xe/xe_guc_submit.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 6392381e8e69..860405527115 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1060,7 +1060,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) struct xe_exec_queue *q = job->q; struct xe_gpu_scheduler *sched = &q->guc->sched; struct xe_guc *guc = exec_queue_to_guc(q); + const char *process_name = "no process"; + struct task_struct *task = NULL; int err = -ETIME; + pid_t pid = -1; int i = 0; bool wedged, skip_timeout_check; @@ -1157,9 +1160,19 @@ trigger_reset: goto sched_enable; } - xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx", + if (q->vm && q->vm->xef) { + task = get_pid_task(q->vm->xef->drm->pid, PIDTYPE_PID); + if (task) { + process_name = task->comm; + pid = task->pid; + } + } + xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]", xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job), - q->guc->id, q->flags); + q->guc->id, q->flags, process_name, pid); + if (task) + put_task_struct(task); + trace_xe_sched_job_timedout(job); if (!exec_queue_killed(q)) From bd85e00fa489f5374c2bad0eac15842d2ec68045 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 5 Jul 2024 21:10:56 +0200 Subject: [PATCH 25/95] drm/xe/kunit: Kill xe_cur_kunit() We shouldn't use custom helper if there is a official one. Signed-off-by: Michal Wajdeczko Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240705191057.1110-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/xe_bo.c | 4 ++-- drivers/gpu/drm/xe/tests/xe_dma_buf.c | 4 ++-- drivers/gpu/drm/xe/tests/xe_migrate.c | 2 +- drivers/gpu/drm/xe/tests/xe_mocs.c | 8 ++++---- drivers/gpu/drm/xe/tests/xe_pci_test.c | 4 ++-- drivers/gpu/drm/xe/tests/xe_test.h | 8 +++----- 6 files changed, 14 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 9f3c02826464..263e0afa8de0 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -154,7 +154,7 @@ out_unlock: static int ccs_test_run_device(struct xe_device *xe) { - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); struct xe_tile *tile; int id; @@ -325,7 +325,7 @@ cleanup_bo: static int evict_test_run_device(struct xe_device *xe) { - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); struct xe_tile *tile; int id; diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c index e7f9b531c465..b56013963911 100644 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c @@ -107,7 +107,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported, static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) { - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv); struct drm_gem_object *import; struct dma_buf *dmabuf; @@ -258,7 +258,7 @@ static const struct dma_buf_test_params test_params[] = { static int dma_buf_run_device(struct xe_device *xe) { const struct dma_buf_test_params *params; - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); xe_pm_runtime_get(xe); for (params = test_params; params->mem_mask; ++params) { diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index 962f6438e219..d277a21ccf91 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -334,7 +334,7 @@ vunmap: static int migrate_test_run_device(struct xe_device *xe) { - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); struct xe_tile *tile; int id; diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index 67c65e88c384..4fff5de92dea 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -23,7 +23,7 @@ struct live_mocs { static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt) { unsigned int flags; - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); memset(arg, 0, sizeof(*arg)); @@ -41,7 +41,7 @@ static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt) static void read_l3cc_table(struct xe_gt *gt, const struct xe_mocs_info *info) { - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); u32 l3cc, l3cc_expected; unsigned int i; u32 reg_val; @@ -78,7 +78,7 @@ static void read_l3cc_table(struct xe_gt *gt, static void read_mocs_table(struct xe_gt *gt, const struct xe_mocs_info *info) { - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); u32 mocs, mocs_expected; unsigned int i; u32 reg_val; @@ -148,7 +148,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe) struct xe_gt *gt; unsigned int flags; int id; - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); xe_pm_runtime_get(xe); diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c index a6705a536391..744a37583d2d 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci_test.c +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c @@ -16,7 +16,7 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics) { - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); u64 mask = graphics->hw_engine_mask; /* RCS, CCS, and BCS engines are allowed on the graphics IP */ @@ -30,7 +30,7 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics) static void check_media_ip(const struct xe_media_desc *media) { - struct kunit *test = xe_cur_kunit(); + struct kunit *test = kunit_get_current_test(); u64 mask = media->hw_engine_mask; /* VCS, VECS and GSCCS engines are allowed on the media IP */ diff --git a/drivers/gpu/drm/xe/tests/xe_test.h b/drivers/gpu/drm/xe/tests/xe_test.h index 7a1ae213e750..55e5b5bedccc 100644 --- a/drivers/gpu/drm/xe/tests/xe_test.h +++ b/drivers/gpu/drm/xe/tests/xe_test.h @@ -9,8 +9,8 @@ #include #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) -#include #include +#include /* * Each test that provides a kunit private test structure, place a test id @@ -32,7 +32,6 @@ struct xe_test_priv { #define XE_TEST_DECLARE(x) x #define XE_TEST_ONLY(x) unlikely(x) #define XE_TEST_EXPORT -#define xe_cur_kunit() current->kunit_test /** * xe_cur_kunit_priv - Obtain the struct xe_test_priv pointed to by @@ -48,10 +47,10 @@ xe_cur_kunit_priv(enum xe_test_priv_id id) { struct xe_test_priv *priv; - if (!xe_cur_kunit()) + if (!kunit_get_current_test()) return NULL; - priv = xe_cur_kunit()->priv; + priv = kunit_get_current_test()->priv; return priv->id == id ? priv : NULL; } @@ -60,7 +59,6 @@ xe_cur_kunit_priv(enum xe_test_priv_id id) #define XE_TEST_DECLARE(x) #define XE_TEST_ONLY(x) 0 #define XE_TEST_EXPORT static -#define xe_cur_kunit() NULL #define xe_cur_kunit_priv(_id) NULL #endif From 57c2b3e684ba3e82e9944bc1975e047bd1630537 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 5 Jul 2024 21:10:57 +0200 Subject: [PATCH 26/95] drm/xe/kunit: Drop XE_TEST_EXPORT It's unused and can be replaced with VISIBLE_IF_KUNIT if needed. Signed-off-by: Michal Wajdeczko Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240705191057.1110-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/xe_test.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_test.h b/drivers/gpu/drm/xe/tests/xe_test.h index 55e5b5bedccc..9c23ad9dba8d 100644 --- a/drivers/gpu/drm/xe/tests/xe_test.h +++ b/drivers/gpu/drm/xe/tests/xe_test.h @@ -31,7 +31,6 @@ struct xe_test_priv { #define XE_TEST_DECLARE(x) x #define XE_TEST_ONLY(x) unlikely(x) -#define XE_TEST_EXPORT /** * xe_cur_kunit_priv - Obtain the struct xe_test_priv pointed to by @@ -58,7 +57,6 @@ xe_cur_kunit_priv(enum xe_test_priv_id id) #define XE_TEST_DECLARE(x) #define XE_TEST_ONLY(x) 0 -#define XE_TEST_EXPORT static #define xe_cur_kunit_priv(_id) NULL #endif From d6e850acc716d0fad756f09488d198db2077141e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 8 Jul 2024 13:12:07 +0200 Subject: [PATCH 27/95] drm/xe/kunit: Simplify xe_bo live tests code layout The test case logic is implemented by the functions compiled as part of the core Xe driver module and then exported to build and register the test suite in the live test module. But we don't need to export individual test case functions, we may just export the entire test suite. And we don't need to register this test suite in a separate file, it can be done in the main file of the live test module. Signed-off-by: Michal Wajdeczko Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240708111210.1154-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/Makefile | 1 - drivers/gpu/drm/xe/tests/xe_bo.c | 20 +++++++++++++++----- drivers/gpu/drm/xe/tests/xe_bo_test.c | 21 --------------------- drivers/gpu/drm/xe/tests/xe_bo_test.h | 14 -------------- drivers/gpu/drm/xe/tests/xe_live_test_mod.c | 5 +++++ 5 files changed, 20 insertions(+), 41 deletions(-) delete mode 100644 drivers/gpu/drm/xe/tests/xe_bo_test.c delete mode 100644 drivers/gpu/drm/xe/tests/xe_bo_test.h diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile index 6e58931fddd4..77331b0a04ad 100644 --- a/drivers/gpu/drm/xe/tests/Makefile +++ b/drivers/gpu/drm/xe/tests/Makefile @@ -3,7 +3,6 @@ # "live" kunit tests obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_live_test.o xe_live_test-y = xe_live_test_mod.o \ - xe_bo_test.o \ xe_dma_buf_test.o \ xe_migrate_test.o \ xe_mocs_test.o diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 263e0afa8de0..692e1b46b9cf 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -6,7 +6,6 @@ #include #include -#include "tests/xe_bo_test.h" #include "tests/xe_pci_test.h" #include "tests/xe_test.h" @@ -177,11 +176,10 @@ static int ccs_test_run_device(struct xe_device *xe) return 0; } -void xe_ccs_migrate_kunit(struct kunit *test) +static void xe_ccs_migrate_kunit(struct kunit *test) { xe_call_for_each_device(ccs_test_run_device); } -EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit); static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test) { @@ -345,8 +343,20 @@ static int evict_test_run_device(struct xe_device *xe) return 0; } -void xe_bo_evict_kunit(struct kunit *test) +static void xe_bo_evict_kunit(struct kunit *test) { xe_call_for_each_device(evict_test_run_device); } -EXPORT_SYMBOL_IF_KUNIT(xe_bo_evict_kunit); + +static struct kunit_case xe_bo_tests[] = { + KUNIT_CASE(xe_ccs_migrate_kunit), + KUNIT_CASE(xe_bo_evict_kunit), + {} +}; + +VISIBLE_IF_KUNIT +struct kunit_suite xe_bo_test_suite = { + .name = "xe_bo", + .test_cases = xe_bo_tests, +}; +EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.c b/drivers/gpu/drm/xe/tests/xe_bo_test.c deleted file mode 100644 index a324cde77db8..000000000000 --- a/drivers/gpu/drm/xe/tests/xe_bo_test.c +++ /dev/null @@ -1,21 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright © 2022 Intel Corporation - */ - -#include "xe_bo_test.h" - -#include - -static struct kunit_case xe_bo_tests[] = { - KUNIT_CASE(xe_ccs_migrate_kunit), - KUNIT_CASE(xe_bo_evict_kunit), - {} -}; - -static struct kunit_suite xe_bo_test_suite = { - .name = "xe_bo", - .test_cases = xe_bo_tests, -}; - -kunit_test_suite(xe_bo_test_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.h b/drivers/gpu/drm/xe/tests/xe_bo_test.h deleted file mode 100644 index 0113ab45066a..000000000000 --- a/drivers/gpu/drm/xe/tests/xe_bo_test.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 AND MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef _XE_BO_TEST_H_ -#define _XE_BO_TEST_H_ - -struct kunit; - -void xe_ccs_migrate_kunit(struct kunit *test); -void xe_bo_evict_kunit(struct kunit *test); - -#endif diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c index eb1ea99a5a8b..3bffcbd233b2 100644 --- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c +++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c @@ -3,6 +3,11 @@ * Copyright © 2023 Intel Corporation */ #include +#include + +extern struct kunit_suite xe_bo_test_suite; + +kunit_test_suite(xe_bo_test_suite); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL"); From ff10c99ab1e644fed578dce13e94e372d2c688c3 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 8 Jul 2024 13:12:08 +0200 Subject: [PATCH 28/95] drm/xe/kunit: Simplify xe_dma_buf live tests code layout The test case logic is implemented by the functions compiled as part of the core Xe driver module and then exported to build and register the test suite in the live test module. But we don't need to export individual test case functions, we may just export the entire test suite. And we don't need to register this test suite in a separate file, it can be done in the main file of the live test module. Signed-off-by: Michal Wajdeczko Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240708111210.1154-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/Makefile | 1 - drivers/gpu/drm/xe/tests/xe_dma_buf.c | 16 +++++++++++++--- drivers/gpu/drm/xe/tests/xe_dma_buf_test.c | 20 -------------------- drivers/gpu/drm/xe/tests/xe_dma_buf_test.h | 13 ------------- drivers/gpu/drm/xe/tests/xe_live_test_mod.c | 2 ++ 5 files changed, 15 insertions(+), 37 deletions(-) delete mode 100644 drivers/gpu/drm/xe/tests/xe_dma_buf_test.c delete mode 100644 drivers/gpu/drm/xe/tests/xe_dma_buf_test.h diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile index 77331b0a04ad..c77a5882d094 100644 --- a/drivers/gpu/drm/xe/tests/Makefile +++ b/drivers/gpu/drm/xe/tests/Makefile @@ -3,7 +3,6 @@ # "live" kunit tests obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_live_test.o xe_live_test-y = xe_live_test_mod.o \ - xe_dma_buf_test.o \ xe_migrate_test.o \ xe_mocs_test.o diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c index b56013963911..4f9dc41e13de 100644 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c @@ -8,7 +8,6 @@ #include #include -#include "tests/xe_dma_buf_test.h" #include "tests/xe_pci_test.h" #include "xe_pci.h" @@ -274,8 +273,19 @@ static int dma_buf_run_device(struct xe_device *xe) return 0; } -void xe_dma_buf_kunit(struct kunit *test) +static void xe_dma_buf_kunit(struct kunit *test) { xe_call_for_each_device(dma_buf_run_device); } -EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_kunit); + +static struct kunit_case xe_dma_buf_tests[] = { + KUNIT_CASE(xe_dma_buf_kunit), + {} +}; + +VISIBLE_IF_KUNIT +struct kunit_suite xe_dma_buf_test_suite = { + .name = "xe_dma_buf", + .test_cases = xe_dma_buf_tests, +}; +EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_test_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c deleted file mode 100644 index 99cdb718b6c6..000000000000 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright © 2022 Intel Corporation - */ - -#include "xe_dma_buf_test.h" - -#include - -static struct kunit_case xe_dma_buf_tests[] = { - KUNIT_CASE(xe_dma_buf_kunit), - {} -}; - -static struct kunit_suite xe_dma_buf_test_suite = { - .name = "xe_dma_buf", - .test_cases = xe_dma_buf_tests, -}; - -kunit_test_suite(xe_dma_buf_test_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h deleted file mode 100644 index e6b464ddd526..000000000000 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 AND MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef _XE_DMA_BUF_TEST_H_ -#define _XE_DMA_BUF_TEST_H_ - -struct kunit; - -void xe_dma_buf_kunit(struct kunit *test); - -#endif diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c index 3bffcbd233b2..d9da15d9fe3f 100644 --- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c +++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c @@ -6,8 +6,10 @@ #include extern struct kunit_suite xe_bo_test_suite; +extern struct kunit_suite xe_dma_buf_test_suite; kunit_test_suite(xe_bo_test_suite); +kunit_test_suite(xe_dma_buf_test_suite); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL"); From 0237368193e897aadeea9801126c101e33047354 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 8 Jul 2024 13:12:09 +0200 Subject: [PATCH 29/95] drm/xe/kunit: Simplify xe_migrate live tests code layout The test case logic is implemented by the functions compiled as part of the core Xe driver module and then exported to build and register the test suite in the live test module. But we don't need to export individual test case functions, we may just export the entire test suite. And we don't need to register this test suite in a separate file, it can be done in the main file of the live test module. Signed-off-by: Michal Wajdeczko Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240708111210.1154-4-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/Makefile | 1 - drivers/gpu/drm/xe/tests/xe_live_test_mod.c | 2 ++ drivers/gpu/drm/xe/tests/xe_migrate.c | 16 +++++++++++++--- drivers/gpu/drm/xe/tests/xe_migrate_test.c | 20 -------------------- drivers/gpu/drm/xe/tests/xe_migrate_test.h | 13 ------------- 5 files changed, 15 insertions(+), 37 deletions(-) delete mode 100644 drivers/gpu/drm/xe/tests/xe_migrate_test.c delete mode 100644 drivers/gpu/drm/xe/tests/xe_migrate_test.h diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile index c77a5882d094..32ce1d6df0fa 100644 --- a/drivers/gpu/drm/xe/tests/Makefile +++ b/drivers/gpu/drm/xe/tests/Makefile @@ -3,7 +3,6 @@ # "live" kunit tests obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_live_test.o xe_live_test-y = xe_live_test_mod.o \ - xe_migrate_test.o \ xe_mocs_test.o # Normal kunit tests diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c index d9da15d9fe3f..4c1e07a0d477 100644 --- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c +++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c @@ -7,9 +7,11 @@ extern struct kunit_suite xe_bo_test_suite; extern struct kunit_suite xe_dma_buf_test_suite; +extern struct kunit_suite xe_migrate_test_suite; kunit_test_suite(xe_bo_test_suite); kunit_test_suite(xe_dma_buf_test_suite); +kunit_test_suite(xe_migrate_test_suite); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index d277a21ccf91..0de0e0c66623 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -6,7 +6,6 @@ #include #include -#include "tests/xe_migrate_test.h" #include "tests/xe_pci_test.h" #include "xe_pci.h" @@ -354,8 +353,19 @@ static int migrate_test_run_device(struct xe_device *xe) return 0; } -void xe_migrate_sanity_kunit(struct kunit *test) +static void xe_migrate_sanity_kunit(struct kunit *test) { xe_call_for_each_device(migrate_test_run_device); } -EXPORT_SYMBOL_IF_KUNIT(xe_migrate_sanity_kunit); + +static struct kunit_case xe_migrate_tests[] = { + KUNIT_CASE(xe_migrate_sanity_kunit), + {} +}; + +VISIBLE_IF_KUNIT +struct kunit_suite xe_migrate_test_suite = { + .name = "xe_migrate", + .test_cases = xe_migrate_tests, +}; +EXPORT_SYMBOL_IF_KUNIT(xe_migrate_test_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.c b/drivers/gpu/drm/xe/tests/xe_migrate_test.c deleted file mode 100644 index eb0d8963419c..000000000000 --- a/drivers/gpu/drm/xe/tests/xe_migrate_test.c +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright © 2022 Intel Corporation - */ - -#include "xe_migrate_test.h" - -#include - -static struct kunit_case xe_migrate_tests[] = { - KUNIT_CASE(xe_migrate_sanity_kunit), - {} -}; - -static struct kunit_suite xe_migrate_test_suite = { - .name = "xe_migrate", - .test_cases = xe_migrate_tests, -}; - -kunit_test_suite(xe_migrate_test_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.h b/drivers/gpu/drm/xe/tests/xe_migrate_test.h deleted file mode 100644 index 7c645c66824f..000000000000 --- a/drivers/gpu/drm/xe/tests/xe_migrate_test.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 AND MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef _XE_MIGRATE_TEST_H_ -#define _XE_MIGRATE_TEST_H_ - -struct kunit; - -void xe_migrate_sanity_kunit(struct kunit *test); - -#endif From e97701a069612ba2fa1d92b56a720b108049df4e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 8 Jul 2024 13:12:10 +0200 Subject: [PATCH 30/95] drm/xe/kunit: Simplify xe_mocs live tests code layout The test case logic is implemented by the functions compiled as part of the core Xe driver module and then exported to build and register the test suite in the live test module. But we don't need to export individual test case functions, we may just export the entire test suite. And we don't need to register this test suite in a separate file, it can be done in the main file of the live test module. Signed-off-by: Michal Wajdeczko Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240708111210.1154-5-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/Makefile | 3 +-- drivers/gpu/drm/xe/tests/xe_live_test_mod.c | 2 ++ drivers/gpu/drm/xe/tests/xe_mocs.c | 20 +++++++++++++++----- drivers/gpu/drm/xe/tests/xe_mocs_test.c | 21 --------------------- drivers/gpu/drm/xe/tests/xe_mocs_test.h | 14 -------------- 5 files changed, 18 insertions(+), 42 deletions(-) delete mode 100644 drivers/gpu/drm/xe/tests/xe_mocs_test.c delete mode 100644 drivers/gpu/drm/xe/tests/xe_mocs_test.h diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile index 32ce1d6df0fa..0e3408f4952c 100644 --- a/drivers/gpu/drm/xe/tests/Makefile +++ b/drivers/gpu/drm/xe/tests/Makefile @@ -2,8 +2,7 @@ # "live" kunit tests obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_live_test.o -xe_live_test-y = xe_live_test_mod.o \ - xe_mocs_test.o +xe_live_test-y = xe_live_test_mod.o # Normal kunit tests obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_test.o diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c index 4c1e07a0d477..5f14737c8210 100644 --- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c +++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c @@ -8,10 +8,12 @@ extern struct kunit_suite xe_bo_test_suite; extern struct kunit_suite xe_dma_buf_test_suite; extern struct kunit_suite xe_migrate_test_suite; +extern struct kunit_suite xe_mocs_test_suite; kunit_test_suite(xe_bo_test_suite); kunit_test_suite(xe_dma_buf_test_suite); kunit_test_suite(xe_migrate_test_suite); +kunit_test_suite(xe_mocs_test_suite); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index 4fff5de92dea..febc1d967850 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -6,7 +6,6 @@ #include #include -#include "tests/xe_mocs_test.h" #include "tests/xe_pci_test.h" #include "tests/xe_test.h" @@ -134,11 +133,10 @@ static int mocs_kernel_test_run_device(struct xe_device *xe) return 0; } -void xe_live_mocs_kernel_kunit(struct kunit *test) +static void xe_live_mocs_kernel_kunit(struct kunit *test) { xe_call_for_each_device(mocs_kernel_test_run_device); } -EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit); static int mocs_reset_test_run_device(struct xe_device *xe) { @@ -175,8 +173,20 @@ static int mocs_reset_test_run_device(struct xe_device *xe) return 0; } -void xe_live_mocs_reset_kunit(struct kunit *test) +static void xe_live_mocs_reset_kunit(struct kunit *test) { xe_call_for_each_device(mocs_reset_test_run_device); } -EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_reset_kunit); + +static struct kunit_case xe_mocs_tests[] = { + KUNIT_CASE(xe_live_mocs_kernel_kunit), + KUNIT_CASE(xe_live_mocs_reset_kunit), + {} +}; + +VISIBLE_IF_KUNIT +struct kunit_suite xe_mocs_test_suite = { + .name = "xe_mocs", + .test_cases = xe_mocs_tests, +}; +EXPORT_SYMBOL_IF_KUNIT(xe_mocs_test_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c deleted file mode 100644 index 6315886b659e..000000000000 --- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c +++ /dev/null @@ -1,21 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright © 2022 Intel Corporation - */ - -#include "xe_mocs_test.h" - -#include - -static struct kunit_case xe_mocs_tests[] = { - KUNIT_CASE(xe_live_mocs_kernel_kunit), - KUNIT_CASE(xe_live_mocs_reset_kunit), - {} -}; - -static struct kunit_suite xe_mocs_test_suite = { - .name = "xe_mocs", - .test_cases = xe_mocs_tests, -}; - -kunit_test_suite(xe_mocs_test_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.h b/drivers/gpu/drm/xe/tests/xe_mocs_test.h deleted file mode 100644 index e7699d495411..000000000000 --- a/drivers/gpu/drm/xe/tests/xe_mocs_test.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 AND MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef _XE_MOCS_TEST_H_ -#define _XE_MOCS_TEST_H_ - -struct kunit; - -void xe_live_mocs_kernel_kunit(struct kunit *test); -void xe_live_mocs_reset_kunit(struct kunit *test); - -#endif From 43a6faa6d9b5e9139758200a79fe9c8f4aaa0c8d Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Thu, 11 Jul 2024 14:12:03 -0700 Subject: [PATCH 31/95] drm/xe/exec: Fix minor bug related to xe_sync_entry_cleanup Increment num_syncs after xe_sync_entry_parse() is successful to ensure the xe_sync_entry_cleanup() logic under "err_syncs" label works correctly. v2: Use the same pattern as that in xe_vm.c (Matt Brost) Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs") Signed-off-by: Ashutosh Dixit Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20240711211203.3728180-1-ashutosh.dixit@intel.com --- drivers/gpu/drm/xe/xe_exec.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index 2d72cdec3a0b..f36980aa26e6 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -118,7 +118,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) u64 addresses[XE_HW_ENGINE_MAX_INSTANCE]; struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn}; struct drm_exec *exec = &vm_exec.exec; - u32 i, num_syncs = 0, num_ufence = 0; + u32 i, num_syncs, num_ufence = 0; struct xe_sched_job *job; struct xe_vm *vm; bool write_locked, skip_retry = false; @@ -156,15 +156,15 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) vm = q->vm; - for (i = 0; i < args->num_syncs; i++) { - err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++], - &syncs_user[i], SYNC_PARSE_FLAG_EXEC | + for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { + err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs], + &syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC | (xe_vm_in_lr_mode(vm) ? SYNC_PARSE_FLAG_LR_MODE : 0)); if (err) goto err_syncs; - if (xe_sync_is_ufence(&syncs[i])) + if (xe_sync_is_ufence(&syncs[num_syncs])) num_ufence++; } @@ -325,8 +325,8 @@ err_unlock_list: if (err == -EAGAIN && !skip_retry) goto retry; err_syncs: - for (i = 0; i < num_syncs; i++) - xe_sync_entry_cleanup(&syncs[i]); + while (num_syncs--) + xe_sync_entry_cleanup(&syncs[num_syncs]); kfree(syncs); err_exec_queue: xe_exec_queue_put(q); From 4c3fe5eae46b92e2fd961b19f7779608352e5368 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Thu, 11 Jul 2024 21:23:19 +0200 Subject: [PATCH 32/95] drm/xe/pf: Limit fair VF LMEM provisioning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Due to the current design of the BO and VRAM manager, any object with XE_BO_FLAG_PINNED flag, which the PF driver uses during VF LMEM provisionining, is created with the TTM_PL_FLAG_CONTIGUOUS flag, which may cause VRAM fragmentation that prevents subsequent allocations of larger objects, like fair VF LMEM provisioning. To avoid such failures, round down fair VF LMEM provisioning size to next power of two size, to compensate what xe_ttm_vram_mgr is doing to achieve contiguous allocations. Fixes: ac6598aed1b3 ("drm/xe/pf: Add support to configure SR-IOV VFs") Signed-off-by: Michal Wajdeczko Reviewed-by: Piotr Piórkowski Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240711192320.1198-2-michal.wajdeczko@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index db6c213da847..4699b7836001 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -1543,6 +1543,7 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs) u64 fair; fair = div_u64(available, num_vfs); + fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */ fair = ALIGN_DOWN(fair, alignment); #ifdef MAX_FAIR_LMEM fair = min_t(u64, MAX_FAIR_LMEM, fair); From 86c5b70a9c0c3f05f7002ef8b789460c96b54e27 Mon Sep 17 00:00:00 2001 From: Tejas Upadhyay Date: Tue, 9 Jul 2024 21:26:06 +0530 Subject: [PATCH 33/95] drm/xe/xe2: Add Wa_15015404425 Wa_15015404425 asks us to perform four "dummy" writes to a non-existent register offset before every real register read. Although the specific offset of the writes doesn't directly matter, the workaround suggests offset 0x130030 as a good target so that these writes will be easy to recognize and filter out in debugging traces. V5(MattR): - Avoid negating an equality comparison V4(MattR): - Use writel and remove xe_reg usage V3(MattR): - Define dummy reg local to function - Avoid tracing dummy writes - Update commit message V2: - Add WA to 8/16/32bit reads also - MattR - Corrected dummy reg address - MattR - Use for loop to avoid mental pause - JaniN Reviewed-by: Matt Roper Signed-off-by: Tejas Upadhyay Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240709155606.2998941-1-tejas.upadhyay@intel.com --- drivers/gpu/drm/xe/xe_mmio.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index f92faad4b96d..cf622d07e190 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -121,12 +121,29 @@ int xe_mmio_init(struct xe_device *xe) return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe); } +static void mmio_flush_pending_writes(struct xe_gt *gt) +{ +#define DUMMY_REG_OFFSET 0x130030 + struct xe_tile *tile = gt_to_tile(gt); + int i; + + if (tile->xe->info.platform != XE_LUNARLAKE) + return; + + /* 4 dummy writes */ + for (i = 0; i < 4; i++) + writel(0, tile->mmio.regs + DUMMY_REG_OFFSET); +} + u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg) { struct xe_tile *tile = gt_to_tile(gt); u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); u8 val; + /* Wa_15015404425 */ + mmio_flush_pending_writes(gt); + val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); trace_xe_reg_rw(gt, false, addr, val, sizeof(val)); @@ -139,6 +156,9 @@ u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg) u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); u16 val; + /* Wa_15015404425 */ + mmio_flush_pending_writes(gt); + val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); trace_xe_reg_rw(gt, false, addr, val, sizeof(val)); @@ -160,6 +180,9 @@ u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg) u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); u32 val; + /* Wa_15015404425 */ + mmio_flush_pending_writes(gt); + if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt))) val = xe_gt_sriov_vf_read32(gt, reg); else From 45d30c828c613441bfbe5c97ca766854152b7181 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sat, 13 Jul 2024 16:26:43 +0200 Subject: [PATCH 34/95] drm/xe/vf: Track writes to inaccessible registers from VF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only limited set of registers is accessible for the VF driver and the hardware will silently drop writes to inaccessible registers. To improve our VF driver lets intercept all such writes to warn about such unexpected writes on debug builds or optionally allow to provide some substitution (as a potential future extension). Signed-off-by: Michal Wajdeczko Cc: Gustavo Sousa Cc: Piotr Piórkowski Reviewed-by: Piotr Piórkowski Link: https://patchwork.freedesktop.org/patch/msgid/20240713142643.1242-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 26 ++++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_gt_sriov_vf.h | 1 + drivers/gpu/drm/xe/xe_mmio.c | 6 +++++- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index 41e46a00c01e..6a87d31c44e6 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -892,6 +892,32 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg) return rr->value; } +/** + * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register. + * @gt: the &xe_gt + * @reg: the register to write + * @val: value to write + * + * This function is for VF use only. + * Currently it will trigger a WARN if running on debug build. + */ +void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) +{ + u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + xe_gt_assert(gt, !reg.vf); + + /* + * In the future, we may want to handle selected writes to inaccessible + * registers in some custom way, but for now let's just log a warning + * about such attempt, as likely we might be doing something wrong. + */ + xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG), + "VF is trying to write %#x to an inaccessible register %#x+%#x\n", + val, reg.addr, addr - reg.addr); +} + /** * xe_gt_sriov_vf_print_config - Print VF self config. * @gt: the &xe_gt diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h index 0de7f8cbcfa6..e541ce57bec2 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h @@ -22,6 +22,7 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt); u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt); u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt); u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg); +void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val); void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p); void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p); diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index cf622d07e190..ea3c37d3e13f 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -171,7 +171,11 @@ void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); trace_xe_reg_rw(gt, true, addr, val, sizeof(val)); - writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); + + if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt))) + xe_gt_sriov_vf_write32(gt, reg, val); + else + writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); } u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg) From e02cea83d32d3a616c9ef8b6b50a83444a6bbcbf Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Mon, 8 Jul 2024 11:49:06 +0300 Subject: [PATCH 35/95] drm/xe/gsc: add Battlemage support Add heci_cscfi support bit for new CSC engine type. It has same mmio offsets as DG2 GSC but separate interrupt flow. Signed-off-by: Alexander Usyskin Reviewed-by: Rodrigo Vivi Signed-off-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20240708084906.2827024-1-alexander.usyskin@intel.com --- drivers/gpu/drm/xe/xe_device_types.h | 3 +++ drivers/gpu/drm/xe/xe_heci_gsc.c | 28 +++++++++++++++++++++++++--- drivers/gpu/drm/xe/xe_heci_gsc.h | 10 ++++++++-- drivers/gpu/drm/xe/xe_irq.c | 2 ++ drivers/gpu/drm/xe/xe_pci.c | 7 +++++-- 5 files changed, 43 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index f0cf9020e463..8e81ade7279b 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -44,6 +44,7 @@ struct xe_pat_ops; #define MEDIA_VERx100(xe) ((xe)->info.media_verx100) #define IS_DGFX(xe) ((xe)->info.is_dgfx) #define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi) +#define HAS_HECI_CSCFI(xe) ((xe)->info.has_heci_cscfi) #define XE_VRAM_FLAGS_NEED64K BIT(0) @@ -289,6 +290,8 @@ struct xe_device { u8 skip_pcode:1; /** @info.has_heci_gscfi: device has heci gscfi */ u8 has_heci_gscfi:1; + /** @info.has_heci_cscfi: device has heci cscfi */ + u8 has_heci_cscfi:1; /** @info.skip_guc_pc: Skip GuC based PM feature init */ u8 skip_guc_pc:1; /** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */ diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c index 1c9d38b6f5f1..65b2e147c4b9 100644 --- a/drivers/gpu/drm/xe/xe_heci_gsc.c +++ b/drivers/gpu/drm/xe/xe_heci_gsc.c @@ -92,7 +92,7 @@ void xe_heci_gsc_fini(struct xe_device *xe) { struct xe_heci_gsc *heci_gsc = &xe->heci_gsc; - if (!HAS_HECI_GSCFI(xe)) + if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe)) return; if (heci_gsc->adev) { @@ -177,12 +177,14 @@ void xe_heci_gsc_init(struct xe_device *xe) const struct heci_gsc_def *def; int ret; - if (!HAS_HECI_GSCFI(xe)) + if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe)) return; heci_gsc->irq = -1; - if (xe->info.platform == XE_PVC) { + if (xe->info.platform == XE_BATTLEMAGE) { + def = &heci_gsc_def_dg2; + } else if (xe->info.platform == XE_PVC) { def = &heci_gsc_def_pvc; } else if (xe->info.platform == XE_DG2) { def = &heci_gsc_def_dg2; @@ -232,3 +234,23 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir) if (ret) drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret); } + +void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir) +{ + int ret; + + if ((iir & CSC_IRQ_INTF(1)) == 0) + return; + + if (!HAS_HECI_CSCFI(xe)) { + drm_warn_once(&xe->drm, "CSC irq: not supported"); + return; + } + + if (xe->heci_gsc.irq < 0) + return; + + ret = generic_handle_irq(xe->heci_gsc.irq); + if (ret) + drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret); +} diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.h b/drivers/gpu/drm/xe/xe_heci_gsc.h index 9db454478fae..48b3b1838045 100644 --- a/drivers/gpu/drm/xe/xe_heci_gsc.h +++ b/drivers/gpu/drm/xe/xe_heci_gsc.h @@ -11,10 +11,15 @@ struct xe_device; struct mei_aux_device; /* - * The HECI1 bit corresponds to bit15 and HECI2 to bit14. + * GSC HECI1 bit corresponds to bit15 and HECI2 to bit14. * The reason for this is to allow growth for more interfaces in the future. */ -#define GSC_IRQ_INTF(_x) BIT(15 - (_x)) +#define GSC_IRQ_INTF(_x) BIT(15 - (_x)) + +/* + * CSC HECI1 bit corresponds to bit9 and HECI2 to bit10. + */ +#define CSC_IRQ_INTF(_x) BIT(9 + (_x)) /** * struct xe_heci_gsc - graphics security controller for xe, HECI interface @@ -31,5 +36,6 @@ struct xe_heci_gsc { void xe_heci_gsc_init(struct xe_device *xe); void xe_heci_gsc_fini(struct xe_device *xe); void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir); +void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir); #endif /* __XE_HECI_GSC_DEV_H__ */ diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 85733f993d09..5f2c368c35ad 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -459,6 +459,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) * the primary tile. */ if (id == 0) { + if (HAS_HECI_CSCFI(xe)) + xe_heci_csc_irq_handler(xe, master_ctl); xe_display_irq_handler(xe, master_ctl); gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); } diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 732ee0d02124..3c4a3c91377a 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -59,6 +59,7 @@ struct xe_device_desc { u8 has_display:1; u8 has_heci_gscfi:1; + u8 has_heci_cscfi:1; u8 has_llc:1; u8 has_mmio_ext:1; u8 has_sriov:1; @@ -345,6 +346,7 @@ static const struct xe_device_desc bmg_desc = { PLATFORM(BATTLEMAGE), .has_display = true, .require_force_probe = true, + .has_heci_cscfi = 1, }; #undef PLATFORM @@ -606,6 +608,7 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.is_dgfx = desc->is_dgfx; xe->info.has_heci_gscfi = desc->has_heci_gscfi; + xe->info.has_heci_cscfi = desc->has_heci_cscfi; xe->info.has_llc = desc->has_llc; xe->info.has_mmio_ext = desc->has_mmio_ext; xe->info.has_sriov = desc->has_sriov; @@ -815,7 +818,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d", + drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", desc->platform_name, subplatform_desc ? subplatform_desc->name : "", xe->info.devid, xe->info.revid, @@ -828,7 +831,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) xe->info.media_verx100 % 100, str_yes_no(xe->info.enable_display), xe->info.dma_mask_size, xe->info.tile_count, - xe->info.has_heci_gscfi); + xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, D:%s, B:%s)\n", xe_step_name(xe->info.step.graphics), From 7dbe8af13c189f5937e87e9fb924d5bbc49e6f71 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 15 Jul 2024 23:39:01 -0700 Subject: [PATCH 36/95] drm/xe: Wedge the entire device Wedge the entire device, not just GT which may have triggered the wedge. To implement this, cleanup the layering so xe_device_declare_wedged() calls into the lower layers (GT) to ensure entire device is wedged. While we are here, also signal any pending GT TLB invalidations upon wedging device. Lastly, short circuit reset wait if device is wedged. v2: - Short circuit reset wait if device is wedged (Local testing) Fixes: 8ed9aaae39f3 ("drm/xe: Force wedged state and block GT reset upon any GPU hang") Cc: Rodrigo Vivi Signed-off-by: Matthew Brost Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240716063902.1390130-1-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_device.c | 6 ++++ drivers/gpu/drm/xe/xe_gt.c | 15 ++++++++++ drivers/gpu/drm/xe/xe_gt.h | 1 + drivers/gpu/drm/xe/xe_guc.c | 16 ++++++++++ drivers/gpu/drm/xe/xe_guc.h | 1 + drivers/gpu/drm/xe/xe_guc_submit.c | 48 +++++++++++++++++++----------- drivers/gpu/drm/xe/xe_guc_submit.h | 1 + drivers/gpu/drm/xe/xe_uc.c | 14 +++++++++ drivers/gpu/drm/xe/xe_uc.h | 1 + 9 files changed, 85 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 06cebaffb451..8b9c191940df 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -911,6 +911,9 @@ u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) */ void xe_device_declare_wedged(struct xe_device *xe) { + struct xe_gt *gt; + u8 id; + if (xe->wedged.mode == 0) { drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); return; @@ -924,4 +927,7 @@ void xe_device_declare_wedged(struct xe_device *xe) "Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n", dev_name(xe->drm.dev)); } + + for_each_gt(gt, xe, id) + xe_gt_declare_wedged(gt); } diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 85f974441d50..58895ed22f6e 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -958,3 +958,18 @@ struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt) return NULL; } + +/** + * xe_gt_declare_wedged() - Declare GT wedged + * @gt: the GT object + * + * Wedge the GT which stops all submission, saves desired debug state, and + * cleans up anything which could timeout. + */ +void xe_gt_declare_wedged(struct xe_gt *gt) +{ + xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode); + + xe_uc_declare_wedged(>->uc); + xe_gt_tlb_invalidation_reset(gt); +} diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h index 1123fdfc4ebc..8b1a5027dcf2 100644 --- a/drivers/gpu/drm/xe/xe_gt.h +++ b/drivers/gpu/drm/xe/xe_gt.h @@ -37,6 +37,7 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile); int xe_gt_init_hwconfig(struct xe_gt *gt); int xe_gt_init_early(struct xe_gt *gt); int xe_gt_init(struct xe_gt *gt); +void xe_gt_declare_wedged(struct xe_gt *gt); int xe_gt_record_default_lrcs(struct xe_gt *gt); /** diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index eb655cee19f7..de0fe9e65746 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -1178,3 +1178,19 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) xe_guc_ct_print(&guc->ct, p, false); xe_guc_submit_print(guc, p); } + +/** + * xe_guc_declare_wedged() - Declare GuC wedged + * @guc: the GuC object + * + * Wedge the GuC which stops all submission, saves desired debug state, and + * cleans up anything which could timeout. + */ +void xe_guc_declare_wedged(struct xe_guc *guc) +{ + xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); + + xe_guc_reset_prepare(guc); + xe_guc_ct_stop(&guc->ct); + xe_guc_submit_wedge(guc); +} diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h index af59c9545753..e0bbf98f849d 100644 --- a/drivers/gpu/drm/xe/xe_guc.h +++ b/drivers/gpu/drm/xe/xe_guc.h @@ -37,6 +37,7 @@ void xe_guc_reset_wait(struct xe_guc *guc); void xe_guc_stop_prepare(struct xe_guc *guc); void xe_guc_stop(struct xe_guc *guc); int xe_guc_start(struct xe_guc *guc); +void xe_guc_declare_wedged(struct xe_guc *guc); static inline u16 xe_engine_class_to_guc_class(enum xe_engine_class class) { diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 860405527115..26f8c6a4f665 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -861,13 +861,40 @@ static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q) xe_sched_tdr_queue_imm(&q->guc->sched); } -static bool guc_submit_hint_wedged(struct xe_guc *guc) +/** + * xe_guc_submit_wedge() - Wedge GuC submission + * @guc: the GuC object + * + * Save exec queue's registered with GuC state by taking a ref to each queue. + * Register a DRMM handler to drop refs upon driver unload. + */ +void xe_guc_submit_wedge(struct xe_guc *guc) { struct xe_device *xe = guc_to_xe(guc); struct xe_exec_queue *q; unsigned long index; int err; + xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); + + err = drmm_add_action_or_reset(&guc_to_xe(guc)->drm, + guc_submit_wedged_fini, guc); + if (err) { + drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n"); + return; + } + + mutex_lock(&guc->submission_state.lock); + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) + if (xe_exec_queue_get_unless_zero(q)) + set_exec_queue_wedged(q); + mutex_unlock(&guc->submission_state.lock); +} + +static bool guc_submit_hint_wedged(struct xe_guc *guc) +{ + struct xe_device *xe = guc_to_xe(guc); + if (xe->wedged.mode != 2) return false; @@ -876,22 +903,6 @@ static bool guc_submit_hint_wedged(struct xe_guc *guc) xe_device_declare_wedged(xe); - xe_guc_submit_reset_prepare(guc); - xe_guc_ct_stop(&guc->ct); - - err = drmm_add_action_or_reset(&guc_to_xe(guc)->drm, - guc_submit_wedged_fini, guc); - if (err) { - drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n"); - return true; /* Device is wedged anyway */ - } - - mutex_lock(&guc->submission_state.lock); - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) - if (xe_exec_queue_get_unless_zero(q)) - set_exec_queue_wedged(q); - mutex_unlock(&guc->submission_state.lock); - return true; } @@ -1717,7 +1728,8 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc) void xe_guc_submit_reset_wait(struct xe_guc *guc) { - wait_event(guc->ct.wq, !guc_read_stopped(guc)); + wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) || + !guc_read_stopped(guc)); } void xe_guc_submit_stop(struct xe_guc *guc) diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h index 4ad5f4c1b084..bdf8c9f3d24a 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.h +++ b/drivers/gpu/drm/xe/xe_guc_submit.h @@ -18,6 +18,7 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc); void xe_guc_submit_reset_wait(struct xe_guc *guc); void xe_guc_submit_stop(struct xe_guc *guc); int xe_guc_submit_start(struct xe_guc *guc); +void xe_guc_submit_wedge(struct xe_guc *guc); int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len); int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len); diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c index 0f240534fb72..0d073a9987c2 100644 --- a/drivers/gpu/drm/xe/xe_uc.c +++ b/drivers/gpu/drm/xe/xe_uc.c @@ -300,3 +300,17 @@ void xe_uc_remove(struct xe_uc *uc) { xe_gsc_remove(&uc->gsc); } + +/** + * xe_uc_declare_wedged() - Declare UC wedged + * @uc: the UC object + * + * Wedge the UC which stops all submission, saves desired debug state, and + * cleans up anything which could timeout. + */ +void xe_uc_declare_wedged(struct xe_uc *uc) +{ + xe_gt_assert(uc_to_gt(uc), uc_to_xe(uc)->wedged.mode); + + xe_guc_declare_wedged(&uc->guc); +} diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h index 11856f24e6f9..506517c11333 100644 --- a/drivers/gpu/drm/xe/xe_uc.h +++ b/drivers/gpu/drm/xe/xe_uc.h @@ -21,5 +21,6 @@ int xe_uc_start(struct xe_uc *uc); int xe_uc_suspend(struct xe_uc *uc); int xe_uc_sanitize_reset(struct xe_uc *uc); void xe_uc_remove(struct xe_uc *uc); +void xe_uc_declare_wedged(struct xe_uc *uc); #endif From 452bca0edbd0764ca0284239d5438b3edd305ab3 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 15 Jul 2024 23:39:02 -0700 Subject: [PATCH 37/95] drm/xe: Don't suspend device upon wedge When wedging a device we shouldn't be suspending device as state for debug will be lost. Also this appears to not work as the below stack trace pops upon trying to resume a wedged device: [ 304.245044] INFO: task cat:12115 blocked for more than 151 seconds. [ 304.251333] Tainted: G W 6.10.0-rc7-xe+ #3518 [ 304.257617] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 304.265459] task:cat state:D stack:13384 pid:12115 tgid:12115 ppid:3986 flags:0x00000006 [ 304.265465] Call Trace: [ 304.265467] [ 304.265469] __schedule+0x3c4/0xdf0 [ 304.265478] schedule+0x3c/0x140 [ 304.265481] rpm_resume+0x1cc/0x740 [ 304.265484] ? __pfx_autoremove_wake_function+0x10/0x10 [ 304.265489] __pm_runtime_resume+0x49/0x80 [ 304.265494] guc_info+0x6b/0xb0 [xe] [ 304.265538] ? __pfx___drm_printfn_seq_file+0x10/0x10 [ 304.265541] ? __pfx___drm_puts_seq_file+0x10/0x10 [ 304.265545] seq_read_iter+0x111/0x4c0 [ 304.265551] seq_read+0xfc/0x140 [ 304.265556] full_proxy_read+0x58/0x80 [ 304.265560] vfs_read+0xa7/0x360 [ 304.265563] ? find_held_lock+0x2b/0x80 [ 304.265568] ksys_read+0x64/0xe0 [ 304.265571] do_syscall_64+0x68/0x140 [ 304.265575] entry_SYSCALL_64_after_hwframe+0x76/0x7e [ 304.265578] RIP: 0033:0x7f4254d14992 [ 304.265580] RSP: 002b:00007ffc558666f8 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 [ 304.265583] RAX: ffffffffffffffda RBX: 0000000000020000 RCX: 00007f4254d14992 [ 304.265584] RDX: 0000000000020000 RSI: 00007f4254ebb000 RDI: 0000000000000003 [ 304.265586] RBP: 00007f4254ebb000 R08: 00007f4254eba010 R09: 00007f4254eba010 [ 304.265587] R10: 0000000000000022 R11: 0000000000000246 R12: 0000000000022000 [ 304.265588] R13: 0000000000000003 R14: 0000000000020000 R15: 0000000000020000 [ 304.265593] [ 304.265594] Showing all locks held in the system: [ 304.265598] 1 lock held by khungtaskd/57: [ 304.265599] #0: ffffffff8273b860 (rcu_read_lock){....}-{1:2}, at: debug_show_all_locks+0x36/0x1c0 [ 304.265607] 3 locks held by kworker/6:1/90: [ 304.265610] 1 lock held by in:imklog/547: [ 304.265611] #0: ffff88810498cd88 (&f->f_pos_lock){+.+.}-{3:3}, at: __fdget_pos+0x76/0xc0 [ 304.265620] 1 lock held by dmesg/1310: v2: Drop local 'err' variable (Jonathan) Fixes: 8ed9aaae39f3 ("drm/xe: Force wedged state and block GT reset upon any GPU hang") Cc: Rodrigo Vivi Signed-off-by: Matthew Brost Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240716063902.1390130-2-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_device.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 8b9c191940df..f51d456d15f7 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -895,6 +895,13 @@ u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) return address & GENMASK_ULL(xe->info.va_bits - 1, 0); } +static void xe_device_wedged_fini(struct drm_device *drm, void *arg) +{ + struct xe_device *xe = arg; + + xe_pm_runtime_put(xe); +} + /** * xe_device_declare_wedged - Declare device wedged * @xe: xe device instance @@ -919,6 +926,13 @@ void xe_device_declare_wedged(struct xe_device *xe) return; } + if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { + drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n"); + return; + } + + xe_pm_runtime_get_noresume(xe); + if (!atomic_xchg(&xe->wedged.flag, 1)) { xe->needs_flr_on_fini = true; drm_err(&xe->drm, From 108c972a11c5f6e37be58207460d9bcac06698db Mon Sep 17 00:00:00 2001 From: Akshata Jahagirdar Date: Wed, 17 Jul 2024 21:10:02 +0000 Subject: [PATCH 38/95] drm/xe/migrate: Handle clear ccs logic for xe2 dgfx For Xe2 dGPU, we clear the bo by modifying the VRAM using an uncompressed pat index which then indirectly updates the compression status as uncompressed i.e zeroed CCS. So xe_migrate_clear() should be updated for BMG to not emit CCS surf copy commands. v2: Moved xe_device_needs_ccs_emit() to xe_migrate.c and changed name to xe_migrate_needs_ccs_emit() since its very specific to migration.(Matt) Signed-off-by: Akshata Jahagirdar Reviewed-by: Matthew Auld Reviewed-by: Himal Prasad Ghimiray Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/8dd869dd8dda5e17ace28c04f1a48675f5540874.1721250309.git.akshata.jahagirdar@intel.com --- drivers/gpu/drm/xe/xe_migrate.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index fa23a7e7ec43..85eec95c9bc2 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -347,6 +347,11 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt) return logical_mask; } +static bool xe_migrate_needs_ccs_emit(struct xe_device *xe) +{ + return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)); +} + /** * xe_migrate_init() - Initialize a migrate context * @tile: Back-pointer to the tile we're initializing for. @@ -420,7 +425,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) return ERR_PTR(err); if (IS_DGFX(xe)) { - if (xe_device_has_flat_ccs(xe)) + if (xe_migrate_needs_ccs_emit(xe)) /* min chunk size corresponds to 4K of CCS Metadata */ m->min_chunk_size = SZ_4K * SZ_64K / xe_device_ccs_bytes(xe, SZ_64K); @@ -1034,7 +1039,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0, avail_pts); - if (xe_device_has_flat_ccs(xe)) + if (xe_migrate_needs_ccs_emit(xe)) batch_size += EMIT_COPY_CCS_DW; /* Clear commands */ @@ -1062,7 +1067,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, if (!clear_system_ccs) emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram); - if (xe_device_has_flat_ccs(xe)) { + if (xe_migrate_needs_ccs_emit(xe)) { emit_copy_ccs(gt, bb, clear_L0_ofs, true, m->cleared_mem_ofs, false, clear_L0); flush_flags = MI_FLUSH_DW_CCS; From 54f07cfc016226c3959e0b3b7ed306124d986ce4 Mon Sep 17 00:00:00 2001 From: Akshata Jahagirdar Date: Wed, 17 Jul 2024 21:10:03 +0000 Subject: [PATCH 39/95] drm/xe/migrate: Add kunit to test clear functionality This test verifies if the main and ccs data are cleared during bo creation. The motivation to use Kunit instead of IGT is that, although we can verify whether the data is zero following bo creation, we cannot confirm whether the zero value after bo creation is the result of our clear function or simply because the initial data present was zero. v2: Updated the mutex_lock and unlock logic, Changed out_unlock to out_put. (Matt) v3: Added missing dma_fence_put(). (Nirmoy) v4: Rebase. v5: Add missing bo_put(), bo_unlock() calls. (Matt Auld) Signed-off-by: Akshata Jahagirdar Reviewed-by: Himal Prasad Ghimiray Acked-by: Nirmoy Das Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/c07603439b88cfc99e78c0e2069327e65d5aa87d.1721250309.git.akshata.jahagirdar@intel.com --- drivers/gpu/drm/xe/tests/xe_migrate.c | 276 ++++++++++++++++++++++++++ 1 file changed, 276 insertions(+) diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index 0de0e0c66623..353b908845f7 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -358,8 +358,284 @@ static void xe_migrate_sanity_kunit(struct kunit *test) xe_call_for_each_device(migrate_test_run_device); } +static struct dma_fence *blt_copy(struct xe_tile *tile, + struct xe_bo *src_bo, struct xe_bo *dst_bo, + bool copy_only_ccs, const char *str, struct kunit *test) +{ + struct xe_gt *gt = tile->primary_gt; + struct xe_migrate *m = tile->migrate; + struct xe_device *xe = gt_to_xe(gt); + struct dma_fence *fence = NULL; + u64 size = src_bo->size; + struct xe_res_cursor src_it, dst_it; + struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource; + u64 src_L0_ofs, dst_L0_ofs; + u32 src_L0_pt, dst_L0_pt; + u64 src_L0, dst_L0; + int err; + bool src_is_vram = mem_type_is_vram(src->mem_type); + bool dst_is_vram = mem_type_is_vram(dst->mem_type); + + if (!src_is_vram) + xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it); + else + xe_res_first(src, 0, size, &src_it); + + if (!dst_is_vram) + xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it); + else + xe_res_first(dst, 0, size, &dst_it); + + while (size) { + u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */ + struct xe_sched_job *job; + struct xe_bb *bb; + u32 flush_flags = 0; + u32 update_idx; + u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; + + src_L0 = xe_migrate_res_sizes(m, &src_it); + dst_L0 = xe_migrate_res_sizes(m, &dst_it); + + src_L0 = min(src_L0, dst_L0); + + batch_size += pte_update_size(m, src_is_vram, src_is_vram, src, &src_it, &src_L0, + &src_L0_ofs, &src_L0_pt, 0, 0, + avail_pts); + + batch_size += pte_update_size(m, dst_is_vram, dst_is_vram, dst, &dst_it, &src_L0, + &dst_L0_ofs, &dst_L0_pt, 0, + avail_pts, avail_pts); + + /* Add copy commands size here */ + batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) + + ((xe_device_has_flat_ccs(xe) && copy_only_ccs) ? EMIT_COPY_CCS_DW : 0); + + bb = xe_bb_new(gt, batch_size, xe->info.has_usm); + if (IS_ERR(bb)) { + err = PTR_ERR(bb); + goto err_sync; + } + + if (src_is_vram) + xe_res_next(&src_it, src_L0); + else + emit_pte(m, bb, src_L0_pt, src_is_vram, false, + &src_it, src_L0, src); + + if (dst_is_vram) + xe_res_next(&dst_it, src_L0); + else + emit_pte(m, bb, dst_L0_pt, dst_is_vram, false, + &dst_it, src_L0, dst); + + bb->cs[bb->len++] = MI_BATCH_BUFFER_END; + update_idx = bb->len; + if (!copy_only_ccs) + emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE); + + if (copy_only_ccs) + flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, + src_is_vram, dst_L0_ofs, + dst_is_vram, src_L0, dst_L0_ofs, + copy_only_ccs); + + job = xe_bb_create_migration_job(m->q, bb, + xe_migrate_batch_base(m, xe->info.has_usm), + update_idx); + if (IS_ERR(job)) { + err = PTR_ERR(job); + goto err; + } + + xe_sched_job_add_migrate_flush(job, flush_flags); + + mutex_lock(&m->job_mutex); + xe_sched_job_arm(job); + dma_fence_put(fence); + fence = dma_fence_get(&job->drm.s_fence->finished); + xe_sched_job_push(job); + + dma_fence_put(m->fence); + m->fence = dma_fence_get(fence); + + mutex_unlock(&m->job_mutex); + + xe_bb_free(bb, fence); + size -= src_L0; + continue; + +err: + xe_bb_free(bb, NULL); + +err_sync: + if (fence) { + dma_fence_wait(fence, false); + dma_fence_put(fence); + } + return ERR_PTR(err); + } + + return fence; +} + +static void test_clear(struct xe_device *xe, struct xe_tile *tile, + struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct kunit *test) +{ + struct dma_fence *fence; + u64 expected, retval; + + expected = 0xd0d0d0d0d0d0d0d0; + xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size); + + fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test); + if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) { + retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64); + if (retval == expected) + KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n"); + } + dma_fence_put(fence); + + fence = blt_copy(tile, vram_bo, sys_bo, false, "Blit copy from vram to sysmem", test); + if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) { + retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64); + check(retval, expected, "Decompressed value must be equal to initial value", test); + retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64); + check(retval, expected, "Decompressed value must be equal to initial value", test); + } + dma_fence_put(fence); + + kunit_info(test, "Clear vram buffer object\n"); + expected = 0x0000000000000000; + fence = xe_migrate_clear(tile->migrate, vram_bo, vram_bo->ttm.resource); + if (sanity_fence_failed(xe, fence, "Clear vram_bo", test)) + return; + dma_fence_put(fence); + + fence = blt_copy(tile, vram_bo, sys_bo, + false, "Blit copy from vram to sysmem", test); + if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) { + retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64); + check(retval, expected, "Clear main buffer first value", test); + retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64); + check(retval, expected, "Clear main buffer last value", test); + } + dma_fence_put(fence); + + fence = blt_copy(tile, vram_bo, sys_bo, + true, "Blit surf copy from vram to sysmem", test); + if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) { + retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64); + check(retval, expected, "Clear ccs data first value", test); + retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64); + check(retval, expected, "Clear ccs data last value", test); + } + dma_fence_put(fence); +} + +static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, + struct kunit *test) +{ + struct xe_bo *sys_bo, *vram_bo; + unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile); + long ret; + + sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M, + DRM_XE_GEM_CPU_CACHING_WC, ttm_bo_type_device, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS); + + if (IS_ERR(sys_bo)) { + KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n", + PTR_ERR(sys_bo)); + return; + } + + xe_bo_lock(sys_bo, false); + ret = xe_bo_validate(sys_bo, NULL, false); + if (ret) { + KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret); + goto free_sysbo; + } + + ret = xe_bo_vmap(sys_bo); + if (ret) { + KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret); + goto free_sysbo; + } + xe_bo_unlock(sys_bo); + + vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M, DRM_XE_GEM_CPU_CACHING_WC, + ttm_bo_type_device, bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS); + if (IS_ERR(vram_bo)) { + KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n", + PTR_ERR(vram_bo)); + return; + } + + xe_bo_lock(vram_bo, false); + ret = xe_bo_validate(vram_bo, NULL, false); + if (ret) { + KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret); + goto free_vrambo; + } + + ret = xe_bo_vmap(vram_bo); + if (ret) { + KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret); + goto free_vrambo; + } + + test_clear(xe, tile, sys_bo, vram_bo, test); + xe_bo_unlock(vram_bo); + + xe_bo_lock(vram_bo, false); + xe_bo_vunmap(vram_bo); + xe_bo_unlock(vram_bo); + + xe_bo_lock(sys_bo, false); + xe_bo_vunmap(sys_bo); + xe_bo_unlock(sys_bo); +free_vrambo: + xe_bo_put(vram_bo); +free_sysbo: + xe_bo_put(sys_bo); +} + +static int validate_ccs_test_run_device(struct xe_device *xe) +{ + struct kunit *test = kunit_get_current_test(); + struct xe_tile *tile; + int id; + + if (!xe_device_has_flat_ccs(xe)) { + kunit_info(test, "Skipping non-flat-ccs device.\n"); + return 0; + } + + if (!(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))) { + kunit_info(test, "Skipping non-xe2 discrete device %s.\n", + dev_name(xe->drm.dev)); + return 0; + } + + xe_pm_runtime_get(xe); + + for_each_tile(tile, xe, id) + validate_ccs_test_run_tile(xe, tile, test); + + xe_pm_runtime_put(xe); + + return 0; +} + +static void xe_validate_ccs_kunit(struct kunit *test) +{ + xe_call_for_each_device(validate_ccs_test_run_device); +} + static struct kunit_case xe_migrate_tests[] = { KUNIT_CASE(xe_migrate_sanity_kunit), + KUNIT_CASE(xe_validate_ccs_kunit), {} }; From 8d79acd567db183e675cccc6cc737d2959e2a2d9 Mon Sep 17 00:00:00 2001 From: Akshata Jahagirdar Date: Wed, 17 Jul 2024 21:10:04 +0000 Subject: [PATCH 40/95] drm/xe/migrate: Add helper function to program identity map Add an helper function to program identity map. v2: Formatting nits Signed-off-by: Akshata Jahagirdar Reviewed-by: Matthew Brost Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/91dc05f05bd33076fb9a9f74f8495b48d2abff53.1721250309.git.akshata.jahagirdar@intel.com --- drivers/gpu/drm/xe/xe_migrate.c | 88 ++++++++++++++++++--------------- 1 file changed, 48 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 85eec95c9bc2..49ad5d8443cf 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -130,6 +130,51 @@ static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr) return addr + (256ULL << xe_pt_shift(2)); } +static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo, + u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs) +{ + u64 pos, ofs, flags; + u64 entry; + /* XXX: Unclear if this should be usable_size? */ + u64 vram_limit = xe->mem.vram.actual_physical_size + + xe->mem.vram.dpa_base; + u32 level = 2; + + ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8; + flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, + true, 0); + + xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M)); + + /* + * Use 1GB pages when possible, last chunk always use 2M + * pages as mixing reserved memory (stolen, WOCPM) with a single + * mapping is not allowed on certain platforms. + */ + for (pos = xe->mem.vram.dpa_base; pos < vram_limit; + pos += SZ_1G, ofs += 8) { + if (pos + SZ_1G >= vram_limit) { + entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs, + pat_index); + xe_map_wr(xe, &bo->vmap, ofs, u64, entry); + + flags = vm->pt_ops->pte_encode_addr(xe, 0, + pat_index, + level - 1, + true, 0); + + for (ofs = pt_2m_ofs; pos < vram_limit; + pos += SZ_2M, ofs += 8) + xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); + break; /* Ensure pos == vram_limit assert correct */ + } + + xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); + } + + xe_assert(xe, pos == vram_limit); +} + static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm) { @@ -253,47 +298,10 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, /* Identity map the entire vram at 256GiB offset */ if (IS_DGFX(xe)) { - u64 pos, ofs, flags; - /* XXX: Unclear if this should be usable_size? */ - u64 vram_limit = xe->mem.vram.actual_physical_size + - xe->mem.vram.dpa_base; + u64 pt31_ofs = bo->size - XE_PAGE_SIZE; - level = 2; - ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8; - flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, - true, 0); - - xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M)); - - /* - * Use 1GB pages when possible, last chunk always use 2M - * pages as mixing reserved memory (stolen, WOCPM) with a single - * mapping is not allowed on certain platforms. - */ - for (pos = xe->mem.vram.dpa_base; pos < vram_limit; - pos += SZ_1G, ofs += 8) { - if (pos + SZ_1G >= vram_limit) { - u64 pt31_ofs = bo->size - XE_PAGE_SIZE; - - entry = vm->pt_ops->pde_encode_bo(bo, pt31_ofs, - pat_index); - xe_map_wr(xe, &bo->vmap, ofs, u64, entry); - - flags = vm->pt_ops->pte_encode_addr(xe, 0, - pat_index, - level - 1, - true, 0); - - for (ofs = pt31_ofs; pos < vram_limit; - pos += SZ_2M, ofs += 8) - xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); - break; /* Ensure pos == vram_limit assert correct */ - } - - xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); - } - - xe_assert(xe, pos == vram_limit); + xe_migrate_program_identity(xe, vm, bo, map_ofs, 256, pat_index, pt31_ofs); + xe_assert(xe, (xe->mem.vram.actual_physical_size <= SZ_256G)); } /* From 2b808d6b2919cb2fe92901e5087da7b4ed4b9e07 Mon Sep 17 00:00:00 2001 From: Akshata Jahagirdar Date: Wed, 17 Jul 2024 21:10:05 +0000 Subject: [PATCH 41/95] drm/xe/xe2: Introduce identity map for compressed pat for vram MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Xe2+ has unified compression (exactly one compression mode/format), where compression is now controlled via PAT at PTE level. This simplifies KMD operations, as it can now decompress freely without concern for the buffer's original compression format—unlike DG2, which had multiple compression formats and thus required copying the raw CCS state during VRAM eviction. In addition mixed VRAM and system memory buffers were not supported with compression enabled. On Xe2 dGPU compression is still only supported with VRAM, however we can now support compression with VRAM and system memory buffers, with GPU access being seamless underneath. So long as when doing VRAM -> system memory the KMD uses compressed -> uncompressed, to decompress it. This also allows CPU access to such buffers, assuming that userspace first decompress the corresponding pages being accessed. If the pages are already in system memory then KMD would have already decompressed them. When restoring such buffers with sysmem -> VRAM the KMD can't easily know which pages were originally compressed, so we always use uncompressed -> uncompressed here. With this it also means we can drop all the raw CCS handling on such platforms (including needing to allocate extra CCS storage). In order to support this we now need to have two different identity mappings for compressed and uncompressed VRAM. In this patch, we set up the additional identity map for the VRAM with compressed pat_index. We then select the appropriate mapping during migration/clear. During eviction (vram->sysmem), we use the mapping from compressed -> uncompressed. During restore (sysmem->vram), we need the mapping from uncompressed -> uncompressed. Therefore, we need to have two different mappings for compressed and uncompressed vram. We set up an additional identity map for the vram with compressed pat_index. We then select the appropriate mapping during migration/clear. v2: Formatting nits, Updated code to match recent changes in xe_migrate_prepare_vm(). (Matt) v3: Move identity map loop to a helper function. (Matt Brost) v4: Split helper function in different patch, and add asserts and nits. (Matt Brost) v5: Convert the 2 bool arguments of pte_update_size to flags argument (Matt Brost) v6: Formatting nits (Matt Brost) Signed-off-by: Akshata Jahagirdar Reviewed-by: Himal Prasad Ghimiray Reviewed-by: Matthew Brost Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/b00db5c7267e54260cb6183ba24b15c1e6ae52a3.1721250309.git.akshata.jahagirdar@intel.com --- drivers/gpu/drm/xe/tests/xe_migrate.c | 9 ++- drivers/gpu/drm/xe/xe_migrate.c | 81 +++++++++++++++++++-------- 2 files changed, 66 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index 353b908845f7..4af27847f3fd 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -393,17 +393,22 @@ static struct dma_fence *blt_copy(struct xe_tile *tile, u32 flush_flags = 0; u32 update_idx; u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; + u32 pte_flags; src_L0 = xe_migrate_res_sizes(m, &src_it); dst_L0 = xe_migrate_res_sizes(m, &dst_it); src_L0 = min(src_L0, dst_L0); - batch_size += pte_update_size(m, src_is_vram, src_is_vram, src, &src_it, &src_L0, + pte_flags = src_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM | + PTE_UPDATE_FLAG_IS_COMP_PTE) : 0; + batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0, &src_L0_ofs, &src_L0_pt, 0, 0, avail_pts); - batch_size += pte_update_size(m, dst_is_vram, dst_is_vram, dst, &dst_it, &src_L0, + pte_flags = dst_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM | + PTE_UPDATE_FLAG_IS_COMP_PTE) : 0; + batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0, &dst_L0_ofs, &dst_L0_pt, 0, avail_pts, avail_pts); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 49ad5d8443cf..c1c751952ce8 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -73,6 +73,7 @@ struct xe_migrate { #define NUM_PT_SLOTS 32 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M #define MAX_NUM_PTE 512 +#define IDENTITY_OFFSET 256ULL /* * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest @@ -120,14 +121,19 @@ static u64 xe_migrate_vm_addr(u64 slot, u32 level) return (slot + 1ULL) << xe_pt_shift(level + 1); } -static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr) +static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte) { /* * Remove the DPA to get a correct offset into identity table for the * migrate offset */ + u64 identity_offset = IDENTITY_OFFSET; + + if (GRAPHICS_VER(xe) >= 20 && is_comp_pte) + identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G); + addr -= xe->mem.vram.dpa_base; - return addr + (256ULL << xe_pt_shift(2)); + return addr + (identity_offset << xe_pt_shift(2)); } static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo, @@ -181,11 +187,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_device *xe = tile_to_xe(tile); u16 pat_index = xe->pat.idx[XE_CACHE_WB]; u8 id = tile->id; - u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level, - num_setup = num_level + 1; + u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level; +#define VRAM_IDENTITY_MAP_COUNT 2 + u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT; +#undef VRAM_IDENTITY_MAP_COUNT u32 map_ofs, level, i; struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo; - u64 entry, pt30_ofs; + u64 entry, pt29_ofs; /* Can't bump NUM_PT_SLOTS too high */ BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE); @@ -205,9 +213,9 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, if (IS_ERR(bo)) return PTR_ERR(bo); - /* PT31 reserved for 2M identity map */ - pt30_ofs = bo->size - 2 * XE_PAGE_SIZE; - entry = vm->pt_ops->pde_encode_bo(bo, pt30_ofs, pat_index); + /* PT30 & PT31 reserved for 2M identity map */ + pt29_ofs = bo->size - 3 * XE_PAGE_SIZE; + entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index); xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE; @@ -259,12 +267,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, } else { u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); - m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr); + m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false); if (xe->info.has_usm) { batch = tile->primary_gt->usm.bb_pool->bo; batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); - m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr); + m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false); } } @@ -298,18 +306,36 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, /* Identity map the entire vram at 256GiB offset */ if (IS_DGFX(xe)) { - u64 pt31_ofs = bo->size - XE_PAGE_SIZE; + u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE; - xe_migrate_program_identity(xe, vm, bo, map_ofs, 256, pat_index, pt31_ofs); - xe_assert(xe, (xe->mem.vram.actual_physical_size <= SZ_256G)); + xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET, + pat_index, pt30_ofs); + xe_assert(xe, xe->mem.vram.actual_physical_size <= + (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G); + + /* + * Identity map the entire vram for compressed pat_index for xe2+ + * if flat ccs is enabled. + */ + if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) { + u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION]; + u64 vram_offset = IDENTITY_OFFSET + + DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G); + u64 pt31_ofs = bo->size - XE_PAGE_SIZE; + + xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE - + IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G); + xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset, + comp_pat_index, pt31_ofs); + } } /* * Example layout created above, with root level = 3: * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's - * [PT9...PT27]: Userspace PT's for VM_BIND, 4 KiB PTE's - * [PT28 = PDE 0] [PT29 = PDE 1] [PT30 = PDE 2] [PT31 = 2M vram identity map] + * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's + * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map] * * This makes the lowest part of the VM point to the pagetables. * Hence the lowest 2M in the vm should point to itself, with a few writes @@ -487,20 +513,26 @@ static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur) return cur->size >= size; } +#define PTE_UPDATE_FLAG_IS_VRAM BIT(0) +#define PTE_UPDATE_FLAG_IS_COMP_PTE BIT(1) + static u32 pte_update_size(struct xe_migrate *m, - bool is_vram, + u32 flags, struct ttm_resource *res, struct xe_res_cursor *cur, u64 *L0, u64 *L0_ofs, u32 *L0_pt, u32 cmd_size, u32 pt_ofs, u32 avail_pts) { u32 cmds = 0; + bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags; + bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags; *L0_pt = pt_ofs; if (is_vram && xe_migrate_allow_identity(*L0, cur)) { /* Offset into identity map. */ *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile), - cur->start + vram_region_gpu_offset(res)); + cur->start + vram_region_gpu_offset(res), + is_comp_pte); cmds += cmd_size; } else { /* Clip L0 to available size */ @@ -779,6 +811,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, u32 update_idx; u64 ccs_ofs, ccs_size; u32 ccs_pt; + u32 pte_flags; bool usm = xe->info.has_usm; u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; @@ -791,17 +824,19 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, src_L0 = min(src_L0, dst_L0); - batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0, + pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; + batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0, &src_L0_ofs, &src_L0_pt, 0, 0, avail_pts); - batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0, + pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; + batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0, &dst_L0_ofs, &dst_L0_pt, 0, avail_pts, avail_pts); if (copy_system_ccs) { ccs_size = xe_device_ccs_bytes(xe, src_L0); - batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size, + batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs, &ccs_pt, 0, 2 * avail_pts, avail_pts); @@ -1034,6 +1069,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, struct xe_sched_job *job; struct xe_bb *bb; u32 batch_size, update_idx; + u32 pte_flags; bool usm = xe->info.has_usm; u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; @@ -1041,8 +1077,9 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, clear_L0 = xe_migrate_res_sizes(m, &src_it); /* Calculate final sizes and batch size.. */ + pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; batch_size = 2 + - pte_update_size(m, clear_vram, src, &src_it, + pte_update_size(m, pte_flags, src, &src_it, &clear_L0, &clear_L0_ofs, &clear_L0_pt, clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0, avail_pts); @@ -1159,7 +1196,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, if (!ppgtt_ofs) ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile), xe_bo_addr(update->pt_bo, 0, - XE_PAGE_SIZE)); + XE_PAGE_SIZE), false); do { u64 addr = ppgtt_ofs + ofs * 8; From 523f191cc0c728a02a7e5fd0ec26526c41f399ef Mon Sep 17 00:00:00 2001 From: Akshata Jahagirdar Date: Wed, 17 Jul 2024 21:10:06 +0000 Subject: [PATCH 42/95] drm/xe/xe_migrate: Handle migration logic for xe2+ dgfx During eviction (vram->sysmem), we use compressed -> uncompressed mapping. During restore (sysmem->vram), we need to use mapping from uncompressed -> uncompressed. Handle logic for selecting the compressed identity map for eviction, and selecting uncompressed map for restore operations. v2: Move check of xe_migrate_ccs_emit() before calling xe_migrate_ccs_copy(). (Nirmoy) Signed-off-by: Akshata Jahagirdar Reviewed-by: Matthew Auld Reviewed-by: Himal Prasad Ghimiray Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/79b3a016e686a662ae68c32b5fc7f0f2ac8043e9.1721250309.git.akshata.jahagirdar@intel.com --- drivers/gpu/drm/xe/xe_migrate.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index c1c751952ce8..c007f68503d4 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -705,7 +705,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m, struct xe_gt *gt = m->tile->primary_gt; u32 flush_flags = 0; - if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) { + if (!copy_ccs && dst_is_indirect) { /* * If the src is already in vram, then it should already * have been cleared by us, or has been populated by the @@ -781,6 +781,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, bool copy_ccs = xe_device_has_flat_ccs(xe) && xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo); bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram); + bool use_comp_pat = GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe) && src_is_vram && !dst_is_vram; /* Copying CCS between two different BOs is not supported yet. */ if (XE_WARN_ON(copy_ccs && src_bo != dst_bo)) @@ -807,7 +808,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */ struct xe_sched_job *job; struct xe_bb *bb; - u32 flush_flags; + u32 flush_flags = 0; u32 update_idx; u64 ccs_ofs, ccs_size; u32 ccs_pt; @@ -825,6 +826,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, src_L0 = min(src_L0, dst_L0); pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; + pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0; batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0, &src_L0_ofs, &src_L0_pt, 0, 0, avail_pts); @@ -845,7 +847,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, /* Add copy commands size here */ batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) + - ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0)); + ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0)); bb = xe_bb_new(gt, batch_size, usm); if (IS_ERR(bb)) { @@ -874,11 +876,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (!copy_only_ccs) emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE); - flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, - IS_DGFX(xe) ? src_is_vram : src_is_pltt, - dst_L0_ofs, - IS_DGFX(xe) ? dst_is_vram : dst_is_pltt, - src_L0, ccs_ofs, copy_ccs); + if (xe_migrate_needs_ccs_emit(xe)) + flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, + IS_DGFX(xe) ? src_is_vram : src_is_pltt, + dst_L0_ofs, + IS_DGFX(xe) ? dst_is_vram : dst_is_pltt, + src_L0, ccs_ofs, copy_ccs); job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm), From 8a92e2a67f627e69dd52aa9c2d3176be13aef2cc Mon Sep 17 00:00:00 2001 From: Akshata Jahagirdar Date: Wed, 17 Jul 2024 21:10:07 +0000 Subject: [PATCH 43/95] drm/xe/migrate: Add kunit to test migration functionality for BMG This part of kunit verifies that - main data is decompressed and ccs data is clear post bo eviction. - main data is raw copied and ccs data is clear post bo restore. v2: Added missing bo_put()/bo_unlock() (Matt Auld) Signed-off-by: Akshata Jahagirdar Reviewed-by: Himal Prasad Ghimiray Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/1d36d4377c566508e42b3fb80d3fe4a588fd00ca.1721250309.git.akshata.jahagirdar@intel.com --- drivers/gpu/drm/xe/tests/xe_migrate.c | 120 +++++++++++++++++++++++++- 1 file changed, 119 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index 4af27847f3fd..48bf7e831014 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -484,6 +484,94 @@ err_sync: return fence; } +static void test_migrate(struct xe_device *xe, struct xe_tile *tile, + struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo, + struct kunit *test) +{ + struct dma_fence *fence; + u64 expected, retval; + long timeout; + long ret; + + expected = 0xd0d0d0d0d0d0d0d0; + xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size); + + fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test); + if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) { + retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64); + if (retval == expected) + KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n"); + } + dma_fence_put(fence); + + kunit_info(test, "Evict vram buffer object\n"); + ret = xe_bo_evict(vram_bo, true); + if (ret) { + KUNIT_FAIL(test, "Failed to evict bo.\n"); + return; + } + + ret = xe_bo_vmap(vram_bo); + if (ret) { + KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret); + return; + } + + retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64); + check(retval, expected, "Clear evicted vram data first value", test); + retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64); + check(retval, expected, "Clear evicted vram data last value", test); + + fence = blt_copy(tile, vram_bo, ccs_bo, + true, "Blit surf copy from vram to sysmem", test); + if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) { + retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64); + check(retval, 0, "Clear ccs data first value", test); + + retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64); + check(retval, 0, "Clear ccs data last value", test); + } + dma_fence_put(fence); + + kunit_info(test, "Restore vram buffer object\n"); + ret = xe_bo_validate(vram_bo, NULL, false); + if (ret) { + KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret); + return; + } + + /* Sync all migration blits */ + timeout = dma_resv_wait_timeout(vram_bo->ttm.base.resv, + DMA_RESV_USAGE_KERNEL, + true, + 5 * HZ); + if (timeout <= 0) { + KUNIT_FAIL(test, "Failed to sync bo eviction.\n"); + return; + } + + ret = xe_bo_vmap(vram_bo); + if (ret) { + KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret); + return; + } + + retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64); + check(retval, expected, "Restored value must be equal to initial value", test); + retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64); + check(retval, expected, "Restored value must be equal to initial value", test); + + fence = blt_copy(tile, vram_bo, ccs_bo, + true, "Blit surf copy from vram to sysmem", test); + if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) { + retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64); + check(retval, 0, "Clear ccs data first value", test); + retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64); + check(retval, 0, "Clear ccs data last value", test); + } + dma_fence_put(fence); +} + static void test_clear(struct xe_device *xe, struct xe_tile *tile, struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct kunit *test) { @@ -541,7 +629,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile, static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test) { - struct xe_bo *sys_bo, *vram_bo; + struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL; unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile); long ret; @@ -569,6 +657,29 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til } xe_bo_unlock(sys_bo); + ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M, DRM_XE_GEM_CPU_CACHING_WC, + ttm_bo_type_device, bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS); + + if (IS_ERR(ccs_bo)) { + KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n", + PTR_ERR(ccs_bo)); + return; + } + + xe_bo_lock(ccs_bo, false); + ret = xe_bo_validate(ccs_bo, NULL, false); + if (ret) { + KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret); + goto free_ccsbo; + } + + ret = xe_bo_vmap(ccs_bo); + if (ret) { + KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret); + goto free_ccsbo; + } + xe_bo_unlock(ccs_bo); + vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M, DRM_XE_GEM_CPU_CACHING_WC, ttm_bo_type_device, bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS); if (IS_ERR(vram_bo)) { @@ -591,17 +702,24 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til } test_clear(xe, tile, sys_bo, vram_bo, test); + test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test); xe_bo_unlock(vram_bo); xe_bo_lock(vram_bo, false); xe_bo_vunmap(vram_bo); xe_bo_unlock(vram_bo); + xe_bo_lock(ccs_bo, false); + xe_bo_vunmap(ccs_bo); + xe_bo_unlock(ccs_bo); + xe_bo_lock(sys_bo, false); xe_bo_vunmap(sys_bo); xe_bo_unlock(sys_bo); free_vrambo: xe_bo_put(vram_bo); +free_ccsbo: + xe_bo_put(ccs_bo); free_sysbo: xe_bo_put(sys_bo); } From 3849c6ff3a1517e4c8a6d004d9cb31241c849713 Mon Sep 17 00:00:00 2001 From: Akshata Jahagirdar Date: Wed, 17 Jul 2024 21:10:08 +0000 Subject: [PATCH 44/95] drm/xe/xe2: Do not run xe_bo_test for xe2+ dgfx In xe2+ dgfx, we don't need to handle the copying of ccs metadata during migration. This test validates the ccs data post clear and copy during evict/restore operation. Thus, we can skip this test on xe2+ dgfx. Signed-off-by: Akshata Jahagirdar Reviewed-by: Himal Prasad Ghimiray Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/57d9df82ad02e53c9b0d2a7d40bb27acce57b927.1721250309.git.akshata.jahagirdar@intel.com --- drivers/gpu/drm/xe/tests/xe_bo.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 692e1b46b9cf..e2e0ea24757a 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -162,6 +162,12 @@ static int ccs_test_run_device(struct xe_device *xe) return 0; } + /* For xe2+ dgfx, we don't handle ccs metadata */ + if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) { + kunit_info(test, "Skipping on xe2+ dgfx device.\n"); + return 0; + } + xe_pm_runtime_get(xe); for_each_tile(tile, xe, id) { From 775d0adc01a55fe0458139330415d86bb3533efe Mon Sep 17 00:00:00 2001 From: Uma Shankar Date: Wed, 17 Jul 2024 13:52:52 +0530 Subject: [PATCH 45/95] drm/xe/fbdev: Limit the usage of stolen for LNL+ As per recommendation in the workarounds: WA_22019338487 There is an issue with accessing Stolen memory pages due a hardware limitation. Limit the usage of stolen memory for fbdev for LNL+. Don't use BIOS FB from stolen on LNL+ and assign the same from system memory. v2: Corrected the WA Number, limited WA to LNL and Adopted XE_WA framework as suggested by Lucas and Matt. v3: Introduced the waxxx_display to implement display side of WA changes on Lunarlake. Used xe_root_mmio_gt and avoid the for loop (Suggested by Lucas) v4: Fixed some nits (Luca) Reviewed-by: Lucas De Marchi Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20240717082252.3875909-1-uma.shankar@intel.com --- drivers/gpu/drm/xe/display/intel_fbdev_fb.c | 6 +++++- drivers/gpu/drm/xe/display/xe_plane_initial.c | 6 ++++++ drivers/gpu/drm/xe/xe_wa_oob.rules | 1 + 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c index 816ad13821a8..cd8948c08661 100644 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c @@ -10,6 +10,9 @@ #include "xe_bo.h" #include "xe_gt.h" #include "xe_ttm_stolen_mgr.h" +#include "xe_wa.h" + +#include struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) @@ -37,7 +40,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, size = PAGE_ALIGN(size); obj = ERR_PTR(-ENODEV); - if (!IS_DGFX(xe)) { + if (!IS_DGFX(xe) && !XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) { obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size, ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | @@ -48,6 +51,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, else drm_info(&xe->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj)); } + if (IS_ERR(obj)) { obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size, ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index e135b20962d9..ace101efd7d0 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -18,6 +18,9 @@ #include "intel_frontbuffer.h" #include "intel_plane_initial.h" #include "xe_bo.h" +#include "xe_wa.h" + +#include static bool intel_reuse_initial_plane_obj(struct intel_crtc *this, @@ -104,6 +107,9 @@ initial_plane_bo(struct xe_device *xe, phys_base = base; flags |= XE_BO_FLAG_STOLEN; + if (XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) + return NULL; + /* * If the FB is too big, just don't use it since fbdev is not very * important and we should probably use that space with FBC or other diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 08f7336881e3..540d38603f32 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -29,4 +29,5 @@ 13011645652 GRAPHICS_VERSION(2004) 22019338487 MEDIA_VERSION(2000) GRAPHICS_VERSION(2001) +22019338487_display PLATFORM(LUNARLAKE) 16023588340 GRAPHICS_VERSION(2001) From 275aa53f59df769802f28a0a623019aadab2e05d Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Wed, 17 Jul 2024 14:59:50 +0200 Subject: [PATCH 46/95] drm/xe/pm: Add trace for pm functions Add trace for xe pm function for better debuggability. v2: Fix indentation and add trace for xe_pm_runtime_get_ioctl Cc: Matthew Brost Cc: Rodrigo Vivi Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20240717125950.9952-1-nirmoy.das@intel.com Signed-off-by: Nirmoy Das --- drivers/gpu/drm/xe/xe_pm.c | 8 ++++++ drivers/gpu/drm/xe/xe_trace.h | 52 +++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index de3b5df65e48..9f3c14fd9f33 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -20,6 +20,7 @@ #include "xe_guc.h" #include "xe_irq.h" #include "xe_pcode.h" +#include "xe_trace.h" #include "xe_wa.h" /** @@ -87,6 +88,7 @@ int xe_pm_suspend(struct xe_device *xe) int err; drm_dbg(&xe->drm, "Suspending device\n"); + trace_xe_pm_suspend(xe, __builtin_return_address(0)); for_each_gt(gt, xe, id) xe_gt_suspend_prepare(gt); @@ -131,6 +133,7 @@ int xe_pm_resume(struct xe_device *xe) int err; drm_dbg(&xe->drm, "Resuming device\n"); + trace_xe_pm_resume(xe, __builtin_return_address(0)); for_each_tile(tile, xe, id) xe_wa_apply_tile_workarounds(tile); @@ -326,6 +329,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe) u8 id; int err = 0; + trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0)); /* Disable access_ongoing asserts and prevent recursive pm calls */ xe_pm_write_callback_task(xe, current); @@ -399,6 +403,7 @@ int xe_pm_runtime_resume(struct xe_device *xe) u8 id; int err = 0; + trace_xe_pm_runtime_resume(xe, __builtin_return_address(0)); /* Disable access_ongoing asserts and prevent recursive pm calls */ xe_pm_write_callback_task(xe, current); @@ -463,6 +468,7 @@ static void pm_runtime_lockdep_prime(void) */ void xe_pm_runtime_get(struct xe_device *xe) { + trace_xe_pm_runtime_get(xe, __builtin_return_address(0)); pm_runtime_get_noresume(xe->drm.dev); if (xe_pm_read_callback_task(xe) == current) @@ -478,6 +484,7 @@ void xe_pm_runtime_get(struct xe_device *xe) */ void xe_pm_runtime_put(struct xe_device *xe) { + trace_xe_pm_runtime_put(xe, __builtin_return_address(0)); if (xe_pm_read_callback_task(xe) == current) { pm_runtime_put_noidle(xe->drm.dev); } else { @@ -495,6 +502,7 @@ void xe_pm_runtime_put(struct xe_device *xe) */ int xe_pm_runtime_get_ioctl(struct xe_device *xe) { + trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0)); if (WARN_ON(xe_pm_read_callback_task(xe) == current)) return -ELOOP; diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h index baba14fb1e32..1abdb30cb7ad 100644 --- a/drivers/gpu/drm/xe/xe_trace.h +++ b/drivers/gpu/drm/xe/xe_trace.h @@ -369,6 +369,58 @@ TRACE_EVENT(xe_reg_rw, (u32)(__entry->val >> 32)) ); +DECLARE_EVENT_CLASS(xe_pm_runtime, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller), + + TP_STRUCT__entry( + __string(dev, __dev_name_xe(xe)) + __field(void *, caller) + ), + + TP_fast_assign( + __assign_str(dev); + __entry->caller = caller; + ), + + TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_resume, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + #endif /* This part must be outside protection */ From 0fde907da2d5fd4da68845e96c6842497159c858 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 17 Jul 2024 07:04:28 -0700 Subject: [PATCH 47/95] drm/xe: Validate user fence during creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fail invalid addresses during user fence creation. Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs") Signed-off-by: Matthew Brost Reviewed-by: Thomas Hellström Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20240717140429.1396820-1-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_sync.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index 2883d9aca404..f2e5e3c8c0cd 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -53,14 +53,18 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr, u64 value) { struct xe_user_fence *ufence; + u64 __user *ptr = u64_to_user_ptr(addr); + + if (!access_ok(ptr, sizeof(ptr))) + return ERR_PTR(-EFAULT); ufence = kmalloc(sizeof(*ufence), GFP_KERNEL); if (!ufence) - return NULL; + return ERR_PTR(-ENOMEM); ufence->xe = xe; kref_init(&ufence->refcount); - ufence->addr = u64_to_user_ptr(addr); + ufence->addr = ptr; ufence->value = value; ufence->mm = current->mm; mmgrab(ufence->mm); @@ -183,8 +187,8 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, } else { sync->ufence = user_fence_create(xe, sync_in.addr, sync_in.timeline_value); - if (XE_IOCTL_DBG(xe, !sync->ufence)) - return -ENOMEM; + if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence))) + return PTR_ERR(sync->ufence); } break; From 5e4e1ed6b87b6b3a3531cd8b5cfa9c3b09d841b2 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 17 Jul 2024 07:04:29 -0700 Subject: [PATCH 48/95] drm/xe: Remove unused xe_sync_entry_wait MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit xe_sync_entry_wait is no longer used, remove it. Signed-off-by: Matthew Brost Reviewed-by: Thomas Hellström Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20240717140429.1396820-2-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_sync.c | 8 -------- drivers/gpu/drm/xe/xe_sync.h | 1 - 2 files changed, 9 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index f2e5e3c8c0cd..533246f42256 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -204,14 +204,6 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, return 0; } -int xe_sync_entry_wait(struct xe_sync_entry *sync) -{ - if (sync->fence) - dma_fence_wait(sync->fence, true); - - return 0; -} - int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job) { int err; diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h index 006dbf780793..256ffc1e54dc 100644 --- a/drivers/gpu/drm/xe/xe_sync.h +++ b/drivers/gpu/drm/xe/xe_sync.h @@ -22,7 +22,6 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, struct xe_sync_entry *sync, struct drm_xe_sync __user *sync_user, unsigned int flags); -int xe_sync_entry_wait(struct xe_sync_entry *sync); int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job); void xe_sync_entry_signal(struct xe_sync_entry *sync, From 7108b4a589cd6d3a2c1276fd610b3500f46de66a Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 10 Jul 2024 15:02:27 -0700 Subject: [PATCH 49/95] drm/xe/uapi: Expose SIMD16 EU mask in topology query MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PVC, Xe2 and later platforms have 16-wide EUs. We were implicitly reporting for PVC the number of 16-wide EUs without giving userspace any hint that they were different than for other platforms. Xe2 and later also have 16-wide, but in those cases the reported number would correspond to the 8-wide count. To avoid confusion and make sure the right number is used by userspace depending on the platform, add a new item to the topology query and drop the one that is not available. The new mask reported for both PVC and Xe2 should now match the numbers reported via hwconfig. v2: Use a different topo item with EU type in its name to report the new mask instead of adding the type itself as the item (Matt Roper) Reviewed-by: Matt Roper Acked-by: José Roberto de Souza Acked-by: Mateusz Jablonski Acked-by: Wenbin Lu Acked-by: Effie Yu Acked-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20240710220446.2169797-1-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_gt_topology.c | 27 ++++++++++++++++++++++----- drivers/gpu/drm/xe/xe_gt_types.h | 11 +++++++++++ drivers/gpu/drm/xe/xe_query.c | 4 +++- include/uapi/drm/xe_drm.h | 10 +++++++++- 4 files changed, 45 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c index 25ff03ab8448..5a1559edf3e9 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.c +++ b/drivers/gpu/drm/xe/xe_gt_topology.c @@ -6,6 +6,7 @@ #include "xe_gt_topology.h" #include +#include #include "regs/xe_gt_regs.h" #include "xe_assert.h" @@ -31,7 +32,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...) } static void -load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask) +load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask, enum xe_gt_eu_type *eu_type) { struct xe_device *xe = gt_to_xe(gt); u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE); @@ -47,11 +48,13 @@ load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask) if (GRAPHICS_VERx100(xe) < 1250) reg_val = ~reg_val & XELP_EU_MASK; - /* On PVC, one bit = one EU */ - if (GRAPHICS_VERx100(xe) == 1260) { + if (GRAPHICS_VERx100(xe) == 1260 || GRAPHICS_VER(xe) >= 20) { + /* SIMD16 EUs, one bit == one EU */ + *eu_type = XE_GT_EU_TYPE_SIMD16; val = reg_val; } else { - /* All other platforms, one bit = 2 EU */ + /* SIMD8 EUs, one bit == 2 EU */ + *eu_type = XE_GT_EU_TYPE_SIMD8; for (i = 0; i < fls(reg_val); i++) if (reg_val & BIT(i)) val |= 0x3 << 2 * i; @@ -213,7 +216,7 @@ xe_gt_topology_init(struct xe_gt *gt) XEHP_GT_COMPUTE_DSS_ENABLE, XEHPC_GT_COMPUTE_DSS_ENABLE_EXT, XE2_GT_COMPUTE_DSS_2); - load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss); + load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss, >->fuse_topo.eu_type); load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask); p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology"); @@ -221,6 +224,18 @@ xe_gt_topology_init(struct xe_gt *gt) xe_gt_topology_dump(gt, &p); } +static const char *eu_type_to_str(enum xe_gt_eu_type eu_type) +{ + switch (eu_type) { + case XE_GT_EU_TYPE_SIMD16: + return "simd16"; + case XE_GT_EU_TYPE_SIMD8: + return "simd8"; + } + + unreachable(); +} + void xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p) { @@ -231,6 +246,8 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p) drm_printf(p, "EU mask per DSS: %*pb\n", XE_MAX_EU_FUSE_BITS, gt->fuse_topo.eu_mask_per_dss); + drm_printf(p, "EU type: %s\n", + eu_type_to_str(gt->fuse_topo.eu_type)); drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS, gt->fuse_topo.l3_bank_mask); diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index 38a0d0e178c8..ef68c4a92972 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -27,6 +27,11 @@ enum xe_gt_type { XE_GT_TYPE_MEDIA, }; +enum xe_gt_eu_type { + XE_GT_EU_TYPE_SIMD8, + XE_GT_EU_TYPE_SIMD16, +}; + #define XE_MAX_DSS_FUSE_REGS 3 #define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS) #define XE_MAX_EU_FUSE_REGS 1 @@ -343,6 +348,12 @@ struct xe_gt { /** @fuse_topo.l3_bank_mask: L3 bank mask */ xe_l3_bank_mask_t l3_bank_mask; + + /** + * @fuse_topo.eu_type: type/width of EU stored in + * fuse_topo.eu_mask_per_dss + */ + enum xe_gt_eu_type eu_type; } fuse_topo; /** @steering: register steering for individual HW units */ diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 4e01df6b1b7a..73ef6e4c2dc9 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -518,7 +518,9 @@ static int query_gt_topology(struct xe_device *xe, if (err) return err; - topo.type = DRM_XE_TOPO_EU_PER_DSS; + topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ? + DRM_XE_TOPO_SIMD16_EU_PER_DSS : + DRM_XE_TOPO_EU_PER_DSS; err = copy_mask(&query_ptr, &topo, gt->fuse_topo.eu_mask_per_dss, sizeof(gt->fuse_topo.eu_mask_per_dss)); diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 19619d4952a8..29425d7fdc77 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -517,7 +517,14 @@ struct drm_xe_query_gt_list { * available per Dual Sub Slices (DSS). For example a query response * containing the following in mask: * ``EU_PER_DSS ff ff 00 00 00 00 00 00`` - * means each DSS has 16 EU. + * means each DSS has 16 SIMD8 EUs. This type may be omitted if device + * doesn't have SIMD8 EUs. + * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution + * Units (EU) available per Dual Sub Slices (DSS). For example a query + * response containing the following in mask: + * ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00`` + * means each DSS has 16 SIMD16 EUs. This type may be omitted if device + * doesn't have SIMD16 EUs. */ struct drm_xe_query_topology_mask { /** @gt_id: GT ID the mask is associated with */ @@ -527,6 +534,7 @@ struct drm_xe_query_topology_mask { #define DRM_XE_TOPO_DSS_COMPUTE 2 #define DRM_XE_TOPO_L3_BANK 3 #define DRM_XE_TOPO_EU_PER_DSS 4 +#define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5 /** @type: type of mask */ __u16 type; From 3d0c4a62cc553c6ffde4cb11620eba991e770665 Mon Sep 17 00:00:00 2001 From: Umesh Nerlige Ramappa Date: Thu, 18 Jul 2024 14:05:45 -0700 Subject: [PATCH 50/95] drm/xe: Move part of xe_file cleanup to a helper In order to make xe_file ref counted, move destruction of xe_file members to a helper. v2: Move xe_vm_close_and_put back into xe_file_close (Matt) Signed-off-by: Umesh Nerlige Ramappa Reviewed-by: Matthew Brost Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240718210548.3580382-2-umesh.nerlige.ramappa@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_device.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index f51d456d15f7..0a7478d1ee63 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -93,9 +93,25 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) return 0; } +static void xe_file_destroy(struct xe_file *xef) +{ + struct xe_device *xe = xef->xe; + + xa_destroy(&xef->exec_queue.xa); + mutex_destroy(&xef->exec_queue.lock); + xa_destroy(&xef->vm.xa); + mutex_destroy(&xef->vm.lock); + + spin_lock(&xe->clients.lock); + xe->clients.count--; + spin_unlock(&xe->clients.lock); + + xe_drm_client_put(xef->client); + kfree(xef); +} + static void xe_file_close(struct drm_device *dev, struct drm_file *file) { - struct xe_device *xe = to_xe_device(dev); struct xe_file *xef = file->driver_priv; struct xe_vm *vm; struct xe_exec_queue *q; @@ -111,21 +127,12 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file) xe_exec_queue_kill(q); xe_exec_queue_put(q); } - xa_destroy(&xef->exec_queue.xa); - mutex_destroy(&xef->exec_queue.lock); mutex_lock(&xef->vm.lock); xa_for_each(&xef->vm.xa, idx, vm) xe_vm_close_and_put(vm); mutex_unlock(&xef->vm.lock); - xa_destroy(&xef->vm.xa); - mutex_destroy(&xef->vm.lock); - spin_lock(&xe->clients.lock); - xe->clients.count--; - spin_unlock(&xe->clients.lock); - - xe_drm_client_put(xef->client); - kfree(xef); + xe_file_destroy(xef); } static const struct drm_ioctl_desc xe_ioctls[] = { From ce8c161cbad43f4056451e541f7ae3471d0cca12 Mon Sep 17 00:00:00 2001 From: Umesh Nerlige Ramappa Date: Thu, 18 Jul 2024 14:05:46 -0700 Subject: [PATCH 51/95] drm/xe: Add ref counting for xe_file Add ref counting for xe_file. v2: - Add kernel doc for exported functions (Matt) - Instead of xe_file_destroy, export the get/put helpers (Lucas) v3: Fixup the kernel-doc format and description (Matt, Lucas) Signed-off-by: Umesh Nerlige Ramappa Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20240718210548.3580382-3-umesh.nerlige.ramappa@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_device.c | 33 ++++++++++++++++++++++++++-- drivers/gpu/drm/xe/xe_device.h | 3 +++ drivers/gpu/drm/xe/xe_device_types.h | 3 +++ 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 0a7478d1ee63..50c302cf3249 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -90,11 +90,14 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) spin_unlock(&xe->clients.lock); file->driver_priv = xef; + kref_init(&xef->refcount); + return 0; } -static void xe_file_destroy(struct xe_file *xef) +static void xe_file_destroy(struct kref *ref) { + struct xe_file *xef = container_of(ref, struct xe_file, refcount); struct xe_device *xe = xef->xe; xa_destroy(&xef->exec_queue.xa); @@ -110,6 +113,32 @@ static void xe_file_destroy(struct xe_file *xef) kfree(xef); } +/** + * xe_file_get() - Take a reference to the xe file object + * @xef: Pointer to the xe file + * + * Anyone with a pointer to xef must take a reference to the xe file + * object using this call. + * + * Return: xe file pointer + */ +struct xe_file *xe_file_get(struct xe_file *xef) +{ + kref_get(&xef->refcount); + return xef; +} + +/** + * xe_file_put() - Drop a reference to the xe file object + * @xef: Pointer to the xe file + * + * Used to drop reference to the xef object + */ +void xe_file_put(struct xe_file *xef) +{ + kref_put(&xef->refcount, xe_file_destroy); +} + static void xe_file_close(struct drm_device *dev, struct drm_file *file) { struct xe_file *xef = file->driver_priv; @@ -132,7 +161,7 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file) xe_vm_close_and_put(vm); mutex_unlock(&xef->vm.lock); - xe_file_destroy(xef); + xe_file_put(xef); } static const struct drm_ioctl_desc xe_ioctls[] = { diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 0a2a3e7fd402..533ccfb2567a 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -171,4 +171,7 @@ static inline bool xe_device_wedged(struct xe_device *xe) void xe_device_declare_wedged(struct xe_device *xe); +struct xe_file *xe_file_get(struct xe_file *xef); +void xe_file_put(struct xe_file *xef); + #endif diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 8e81ade7279b..36252d5b1663 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -581,6 +581,9 @@ struct xe_file { /** @client: drm client */ struct xe_drm_client *client; + + /** @refcount: ref count of this xe file */ + struct kref refcount; }; #endif From a2387e69493df3de706f14e4573ee123d23d5d34 Mon Sep 17 00:00:00 2001 From: Umesh Nerlige Ramappa Date: Thu, 18 Jul 2024 14:05:47 -0700 Subject: [PATCH 52/95] drm/xe: Take a ref to xe file when user creates a VM Take a reference to xef when user creates the VM and put the reference when user destroys the VM. Signed-off-by: Umesh Nerlige Ramappa Reviewed-by: Matthew Brost Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240718210548.3580382-4-umesh.nerlige.ramappa@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_vm.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index cf3aea5d8cdc..6adb0ff09d40 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1670,6 +1670,10 @@ static void vm_destroy_work_func(struct work_struct *w) XE_WARN_ON(vm->pt_root[id]); trace_xe_vm_free(vm); + + if (vm->xef) + xe_file_put(vm->xef); + kfree(vm); } @@ -1802,7 +1806,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, } args->vm_id = id; - vm->xef = xef; + vm->xef = xe_file_get(xef); /* Record BO memory for VM pagetable created against client */ for_each_tile(tile, xe, id) From 2149ded63079449b8dddf9da38392632f155e6b5 Mon Sep 17 00:00:00 2001 From: Umesh Nerlige Ramappa Date: Thu, 18 Jul 2024 14:05:48 -0700 Subject: [PATCH 53/95] drm/xe: Fix use after free when client stats are captured xe_file_close triggers an asynchronous queue cleanup and then frees up the xef object. Since queue cleanup flushes all pending jobs and the KMD stores client usage stats into the xef object after jobs are flushed, we see a use-after-free for the xef object. Resolve this by taking a reference to xef from xe_exec_queue. While at it, revert an earlier change that contained a partial work around for this issue. v2: - Take a ref to xef even for the VM bind queue (Matt) - Squash patches relevant to that fix and work around (Lucas) v3: Fix typo (Lucas) Fixes: ce62827bc294 ("drm/xe: Do not access xe file when updating exec queue run_ticks") Fixes: 6109f24f87d7 ("drm/xe: Add helper to accumulate exec queue runtime") Closes: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1908 Signed-off-by: Umesh Nerlige Ramappa Reviewed-by: Matthew Brost Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240718210548.3580382-5-umesh.nerlige.ramappa@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_drm_client.c | 5 +---- drivers/gpu/drm/xe/xe_exec_queue.c | 10 +++++++++- drivers/gpu/drm/xe/xe_exec_queue_types.h | 7 +++---- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c index 6a26923fa10e..7ddd59908334 100644 --- a/drivers/gpu/drm/xe/xe_drm_client.c +++ b/drivers/gpu/drm/xe/xe_drm_client.c @@ -251,11 +251,8 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file) /* Accumulate all the exec queues from this client */ mutex_lock(&xef->exec_queue.lock); - xa_for_each(&xef->exec_queue.xa, i, q) { + xa_for_each(&xef->exec_queue.xa, i, q) xe_exec_queue_update_run_ticks(q); - xef->run_ticks[q->class] += q->run_ticks - q->old_run_ticks; - q->old_run_ticks = q->run_ticks; - } mutex_unlock(&xef->exec_queue.lock); /* Get the total GPU cycles */ diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 3336a01a1006..69867a7b7c77 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -37,6 +37,10 @@ static void __xe_exec_queue_free(struct xe_exec_queue *q) { if (q->vm) xe_vm_put(q->vm); + + if (q->xef) + xe_file_put(q->xef); + kfree(q); } @@ -649,6 +653,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, goto kill_exec_queue; args->exec_queue_id = id; + q->xef = xe_file_get(xef); return 0; @@ -762,6 +767,7 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q) */ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) { + struct xe_file *xef; struct xe_lrc *lrc; u32 old_ts, new_ts; @@ -773,6 +779,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) if (!q->vm || !q->vm->xef) return; + xef = q->vm->xef; + /* * Only sample the first LRC. For parallel submission, all of them are * scheduled together and we compensate that below by multiplying by @@ -783,7 +791,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) */ lrc = q->lrc[0]; new_ts = xe_lrc_update_timestamp(lrc, &old_ts); - q->run_ticks += (new_ts - old_ts) * q->width; + xef->run_ticks[q->class] += (new_ts - old_ts) * q->width; } void xe_exec_queue_kill(struct xe_exec_queue *q) diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index ded9f9396429..1408b02eea53 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -38,6 +38,9 @@ enum xe_exec_queue_priority { * a kernel object. */ struct xe_exec_queue { + /** @xef: Back pointer to xe file if this is user created exec queue */ + struct xe_file *xef; + /** @gt: graphics tile this exec queue can submit to */ struct xe_gt *gt; /** @@ -139,10 +142,6 @@ struct xe_exec_queue { * Protected by @vm's resv. Unused if @vm == NULL. */ u64 tlb_flush_seqno; - /** @old_run_ticks: prior hw engine class run time in ticks for this exec queue */ - u64 old_run_ticks; - /** @run_ticks: hw engine class run time in ticks for this exec queue */ - u64 run_ticks; /** @lrc: logical ring context for this exec queue */ struct xe_lrc *lrc[]; }; From ad16682db18f4414e53bba1ce0db75b08bdc4dff Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Thu, 18 Jul 2024 22:31:55 +0200 Subject: [PATCH 54/95] drm/xe/vf: Fix register value lookup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We should use the number of actual entries stored in the runtime register buffer, not the maximum number of entries that this buffer can hold, otherwise bsearch() may fail and we may miss the data and wrongly report unexpected access to some registers. Fixes: 4edadc41a3a4 ("drm/xe/vf: Use register values obtained from the PF") Signed-off-by: Michal Wajdeczko Cc: Piotr Piórkowski Cc: Matt Roper Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240718203155.486-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index 6a87d31c44e6..47222bd9988d 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -850,7 +850,7 @@ static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr) xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); - return bsearch(&key, runtime->regs, runtime->regs_size, sizeof(key), + return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key), vf_runtime_reg_cmp); } From a522b285c6b4b611406d59612a8d7241714d2e31 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Fri, 19 Jul 2024 10:29:02 -0700 Subject: [PATCH 55/95] drm/xe: Add xe_gt_tlb_invalidation_fence_init helper Other layers should not be touching struct xe_gt_tlb_invalidation_fence directly, add helper for initialization. v2: - Add dma_fence_get and list init to xe_gt_tlb_invalidation_fence_init Signed-off-by: Matthew Brost Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20240719172905.1527927-2-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 36 +++++++++++++++++++++ drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h | 3 ++ drivers/gpu/drm/xe/xe_pt.c | 26 +-------------- 3 files changed, 40 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index d9359976ab8b..92a18a0e4acd 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -508,3 +508,39 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len) return 0; } + +static const char * +invalidation_fence_get_driver_name(struct dma_fence *dma_fence) +{ + return "xe"; +} + +static const char * +invalidation_fence_get_timeline_name(struct dma_fence *dma_fence) +{ + return "invalidation_fence"; +} + +static const struct dma_fence_ops invalidation_fence_ops = { + .get_driver_name = invalidation_fence_get_driver_name, + .get_timeline_name = invalidation_fence_get_timeline_name, +}; + +/** + * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence + * @gt: GT + * @fence: TLB invalidation fence to initialize + * + * Initialize TLB invalidation fence for use + */ +void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, + struct xe_gt_tlb_invalidation_fence *fence) +{ + spin_lock_irq(>->tlb_invalidation.lock); + dma_fence_init(&fence->base, &invalidation_fence_ops, + >->tlb_invalidation.lock, + dma_fence_context_alloc(1), 1); + spin_unlock_irq(>->tlb_invalidation.lock); + INIT_LIST_HEAD(&fence->link); + dma_fence_get(&fence->base); +} diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index bf3bebd9f985..948f4a2f5214 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -26,4 +26,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno); int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len); +void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, + struct xe_gt_tlb_invalidation_fence *fence); + #endif /* _XE_GT_TLB_INVALIDATION_ */ diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index f391de908033..1caa99b22c73 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -1317,23 +1317,6 @@ struct invalidation_fence { u32 asid; }; -static const char * -invalidation_fence_get_driver_name(struct dma_fence *dma_fence) -{ - return "xe"; -} - -static const char * -invalidation_fence_get_timeline_name(struct dma_fence *dma_fence) -{ - return "invalidation_fence"; -} - -static const struct dma_fence_ops invalidation_fence_ops = { - .get_driver_name = invalidation_fence_get_driver_name, - .get_timeline_name = invalidation_fence_get_timeline_name, -}; - static void invalidation_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { @@ -1372,15 +1355,8 @@ static void invalidation_fence_init(struct xe_gt *gt, trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base); - spin_lock_irq(>->tlb_invalidation.lock); - dma_fence_init(&ifence->base.base, &invalidation_fence_ops, - >->tlb_invalidation.lock, - dma_fence_context_alloc(1), 1); - spin_unlock_irq(>->tlb_invalidation.lock); + xe_gt_tlb_invalidation_fence_init(gt, &ifence->base); - INIT_LIST_HEAD(&ifence->base.link); - - dma_fence_get(&ifence->base.base); /* Ref for caller */ ifence->fence = fence; ifence->gt = gt; ifence->start = start; From 61ac035361ae555ee5a17a7667fe96afdde3d59a Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Fri, 19 Jul 2024 10:29:03 -0700 Subject: [PATCH 56/95] drm/xe: Drop xe_gt_tlb_invalidation_wait Having two methods to wait on GT TLB invalidations is not ideal. Remove xe_gt_tlb_invalidation_wait and only use GT TLB invalidation fences. In addition to two methods being less than ideal, once GT TLB invalidations are coalesced the seqno cannot be assigned during xe_gt_tlb_invalidation_ggtt/range. Thus xe_gt_tlb_invalidation_wait would not have a seqno to wait one. A fence however can be armed and later signaled. v3: - Add explaination about coalescing to commit message v4: - Don't put dma fence if defined on stack (CI) v5: - Initialize ret to zero (CI) v6: - Use invalidation_fence_signal helper in tlb timeout (Matthew Auld) Signed-off-by: Matthew Brost Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20240719172905.1527927-3-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 148 ++++++++------------ drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h | 10 +- drivers/gpu/drm/xe/xe_pt.c | 2 +- drivers/gpu/drm/xe/xe_vm.c | 30 ++-- 4 files changed, 80 insertions(+), 110 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 92a18a0e4acd..c3419d4412ce 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -17,6 +17,8 @@ #include "xe_trace.h" #include "regs/xe_guc_regs.h" +#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS + /* * TLB inval depends on pending commands in the CT queue and then the real * invalidation time. Double up the time to process full CT queue @@ -33,6 +35,23 @@ static long tlb_timeout_jiffies(struct xe_gt *gt) return hw_tlb_timeout + 2 * delay; } +static void +__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence) +{ + bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags); + + trace_xe_gt_tlb_invalidation_fence_signal(xe, fence); + dma_fence_signal(&fence->base); + if (!stack) + dma_fence_put(&fence->base); +} + +static void +invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence) +{ + list_del(&fence->link); + __invalidation_fence_signal(xe, fence); +} static void xe_gt_tlb_fence_timeout(struct work_struct *work) { @@ -54,10 +73,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work) xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d", fence->seqno, gt->tlb_invalidation.seqno_recv); - list_del(&fence->link); fence->base.error = -ETIME; - dma_fence_signal(&fence->base); - dma_fence_put(&fence->base); + invalidation_fence_signal(xe, fence); } if (!list_empty(>->tlb_invalidation.pending_fences)) queue_delayed_work(system_wq, @@ -87,21 +104,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt) return 0; } -static void -__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence) -{ - trace_xe_gt_tlb_invalidation_fence_signal(xe, fence); - dma_fence_signal(&fence->base); - dma_fence_put(&fence->base); -} - -static void -invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence) -{ - list_del(&fence->link); - __invalidation_fence_signal(xe, fence); -} - /** * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset * @gt: graphics tile @@ -111,7 +113,6 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe void xe_gt_tlb_invalidation_reset(struct xe_gt *gt) { struct xe_gt_tlb_invalidation_fence *fence, *next; - struct xe_guc *guc = >->uc.guc; int pending_seqno; /* @@ -134,7 +135,6 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt) else pending_seqno = gt->tlb_invalidation.seqno - 1; WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno); - wake_up_all(&guc->ct.wq); list_for_each_entry_safe(fence, next, >->tlb_invalidation.pending_fences, link) @@ -165,6 +165,8 @@ static int send_tlb_invalidation(struct xe_guc *guc, int seqno; int ret; + xe_gt_assert(gt, fence); + /* * XXX: The seqno algorithm relies on TLB invalidation being processed * in order which they currently are, if that changes the algorithm will @@ -173,10 +175,8 @@ static int send_tlb_invalidation(struct xe_guc *guc, mutex_lock(&guc->ct.lock); seqno = gt->tlb_invalidation.seqno; - if (fence) { - fence->seqno = seqno; - trace_xe_gt_tlb_invalidation_fence_send(xe, fence); - } + fence->seqno = seqno; + trace_xe_gt_tlb_invalidation_fence_send(xe, fence); action[1] = seqno; ret = xe_guc_ct_send_locked(&guc->ct, action, len, G2H_LEN_DW_TLB_INVALIDATE, 1); @@ -209,7 +209,6 @@ static int send_tlb_invalidation(struct xe_guc *guc, TLB_INVALIDATION_SEQNO_MAX; if (!gt->tlb_invalidation.seqno) gt->tlb_invalidation.seqno = 1; - ret = seqno; } mutex_unlock(&guc->ct.lock); @@ -223,14 +222,16 @@ static int send_tlb_invalidation(struct xe_guc *guc, /** * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC * @gt: graphics tile + * @fence: invalidation fence which will be signal on TLB invalidation + * completion * * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and - * caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion. + * caller can use the invalidation fence to wait for completion. * - * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success, - * negative error code on error. + * Return: 0 on success, negative error code on error */ -static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt) +static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt, + struct xe_gt_tlb_invalidation_fence *fence) { u32 action[] = { XE_GUC_ACTION_TLB_INVALIDATION, @@ -238,7 +239,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt) MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC), }; - return send_tlb_invalidation(>->uc.guc, NULL, action, + return send_tlb_invalidation(>->uc.guc, fence, action, ARRAY_SIZE(action)); } @@ -257,13 +258,15 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) if (xe_guc_ct_enabled(>->uc.guc.ct) && gt->uc.guc.submission_state.enabled) { - int seqno; + struct xe_gt_tlb_invalidation_fence fence; + int ret; - seqno = xe_gt_tlb_invalidation_guc(gt); - if (seqno <= 0) - return seqno; + xe_gt_tlb_invalidation_fence_init(gt, &fence, true); + ret = xe_gt_tlb_invalidation_guc(gt, &fence); + if (ret < 0) + return ret; - xe_gt_tlb_invalidation_wait(gt, seqno); + xe_gt_tlb_invalidation_fence_wait(&fence); } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) { if (IS_SRIOV_VF(xe)) return 0; @@ -290,18 +293,16 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) * * @gt: graphics tile * @fence: invalidation fence which will be signal on TLB invalidation - * completion, can be NULL + * completion * @start: start address * @end: end address * @asid: address space id * * Issue a range based TLB invalidation if supported, if not fallback to a full - * TLB invalidation. Completion of TLB is asynchronous and caller can either use - * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for - * completion. + * TLB invalidation. Completion of TLB is asynchronous and caller can use + * the invalidation fence to wait for completion. * - * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success, - * negative error code on error. + * Return: Negative error code on error, 0 on success */ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, @@ -312,11 +313,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, u32 action[MAX_TLB_INVALIDATION_LEN]; int len = 0; + xe_gt_assert(gt, fence); + /* Execlists not supported */ if (gt_to_xe(gt)->info.force_execlist) { - if (fence) - __invalidation_fence_signal(xe, fence); - + __invalidation_fence_signal(xe, fence); return 0; } @@ -382,12 +383,10 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, * @vma: VMA to invalidate * * Issue a range based TLB invalidation if supported, if not fallback to a full - * TLB invalidation. Completion of TLB is asynchronous and caller can either use - * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for - * completion. + * TLB invalidation. Completion of TLB is asynchronous and caller can use + * the invalidation fence to wait for completion. * - * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success, - * negative error code on error. + * Return: Negative error code on error, 0 on success */ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, @@ -400,43 +399,6 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, xe_vma_vm(vma)->usm.asid); } -/** - * xe_gt_tlb_invalidation_wait - Wait for TLB to complete - * @gt: graphics tile - * @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation - * - * Wait for tlb_timeout_jiffies() for a TLB invalidation to complete. - * - * Return: 0 on success, -ETIME on TLB invalidation timeout - */ -int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno) -{ - struct xe_guc *guc = >->uc.guc; - int ret; - - /* Execlists not supported */ - if (gt_to_xe(gt)->info.force_execlist) - return 0; - - /* - * XXX: See above, this algorithm only works if seqno are always in - * order - */ - ret = wait_event_timeout(guc->ct.wq, - tlb_invalidation_seqno_past(gt, seqno), - tlb_timeout_jiffies(gt)); - if (!ret) { - struct drm_printer p = xe_gt_err_printer(gt); - - xe_gt_err(gt, "TLB invalidation time'd out, seqno=%d, recv=%d\n", - seqno, gt->tlb_invalidation.seqno_recv); - xe_guc_ct_print(&guc->ct, &p, true); - return -ETIME; - } - - return 0; -} - /** * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler * @guc: guc @@ -480,12 +442,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len) return 0; } - /* - * wake_up_all() and wait_event_timeout() already have the correct - * barriers. - */ WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]); - wake_up_all(&guc->ct.wq); list_for_each_entry_safe(fence, next, >->tlb_invalidation.pending_fences, link) { @@ -530,11 +487,13 @@ static const struct dma_fence_ops invalidation_fence_ops = { * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence * @gt: GT * @fence: TLB invalidation fence to initialize + * @stack: fence is stack variable * * Initialize TLB invalidation fence for use */ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, - struct xe_gt_tlb_invalidation_fence *fence) + struct xe_gt_tlb_invalidation_fence *fence, + bool stack) { spin_lock_irq(>->tlb_invalidation.lock); dma_fence_init(&fence->base, &invalidation_fence_ops, @@ -542,5 +501,8 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, dma_fence_context_alloc(1), 1); spin_unlock_irq(>->tlb_invalidation.lock); INIT_LIST_HEAD(&fence->link); - dma_fence_get(&fence->base); + if (stack) + set_bit(FENCE_STACK_BIT, &fence->base.flags); + else + dma_fence_get(&fence->base); } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index 948f4a2f5214..f430d5797af7 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -23,10 +23,16 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, int xe_gt_tlb_invalidation_range(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, u64 start, u64 end, u32 asid); -int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno); int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len); void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, - struct xe_gt_tlb_invalidation_fence *fence); + struct xe_gt_tlb_invalidation_fence *fence, + bool stack); + +static inline void +xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence) +{ + dma_fence_wait(&fence->base, false); +} #endif /* _XE_GT_TLB_INVALIDATION_ */ diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index 1caa99b22c73..c24e869b7eae 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -1355,7 +1355,7 @@ static void invalidation_fence_init(struct xe_gt *gt, trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base); - xe_gt_tlb_invalidation_fence_init(gt, &ifence->base); + xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false); ifence->fence = fence; ifence->gt = gt; diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 6adb0ff09d40..86e9e2f877e5 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3180,10 +3180,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) { struct xe_device *xe = xe_vma_vm(vma)->xe; struct xe_tile *tile; + struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE]; u32 tile_needs_invalidate = 0; - int seqno[XE_MAX_TILES_PER_DEVICE]; u8 id; - int ret; + int ret = 0; xe_assert(xe, !xe_vma_is_null(vma)); trace_xe_vma_invalidate(vma); @@ -3208,29 +3208,31 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) for_each_tile(tile, xe, id) { if (xe_pt_zap_ptes(tile, vma)) { - tile_needs_invalidate |= BIT(id); xe_device_wmb(xe); + xe_gt_tlb_invalidation_fence_init(tile->primary_gt, + &fence[id], true); + /* * FIXME: We potentially need to invalidate multiple * GTs within the tile */ - seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma); - if (seqno[id] < 0) - return seqno[id]; + ret = xe_gt_tlb_invalidation_vma(tile->primary_gt, + &fence[id], vma); + if (ret < 0) + goto wait; + + tile_needs_invalidate |= BIT(id); } } - for_each_tile(tile, xe, id) { - if (tile_needs_invalidate & BIT(id)) { - ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]); - if (ret < 0) - return ret; - } - } +wait: + for_each_tile(tile, xe, id) + if (tile_needs_invalidate & BIT(id)) + xe_gt_tlb_invalidation_fence_wait(&fence[id]); vma->tile_invalidated = vma->tile_mask; - return 0; + return ret; } struct xe_vm_snapshot { From 0a382f9bc5dc4744a33970a5ed4df8f9c702ee9e Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Fri, 19 Jul 2024 10:29:04 -0700 Subject: [PATCH 57/95] drm/xe: Hold a PM ref when GT TLB invalidations are inflight Avoid GT TLB invalidation timeouts by holding a PM ref when invalidations are inflight. v2: - Drop PM ref before signaling fence (CI) v3: - Move invalidation_fence_signal helper in tlb timeout to previous patch (Matthew Auld) Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs") Cc: Rodrigo Vivi Cc: Nirmoy Das Signed-off-by: Matthew Brost Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20240719172905.1527927-4-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 23 +++++++++++++++++-- drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h | 1 + .../gpu/drm/xe/xe_gt_tlb_invalidation_types.h | 4 ++++ drivers/gpu/drm/xe/xe_vm.c | 4 +++- 4 files changed, 29 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index c3419d4412ce..481d83d07367 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -13,6 +13,7 @@ #include "xe_guc.h" #include "xe_guc_ct.h" #include "xe_mmio.h" +#include "xe_pm.h" #include "xe_sriov.h" #include "xe_trace.h" #include "regs/xe_guc_regs.h" @@ -41,6 +42,7 @@ __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_ bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags); trace_xe_gt_tlb_invalidation_fence_signal(xe, fence); + xe_gt_tlb_invalidation_fence_fini(fence); dma_fence_signal(&fence->base); if (!stack) dma_fence_put(&fence->base); @@ -263,8 +265,10 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) xe_gt_tlb_invalidation_fence_init(gt, &fence, true); ret = xe_gt_tlb_invalidation_guc(gt, &fence); - if (ret < 0) + if (ret < 0) { + xe_gt_tlb_invalidation_fence_fini(&fence); return ret; + } xe_gt_tlb_invalidation_fence_wait(&fence); } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) { @@ -489,12 +493,15 @@ static const struct dma_fence_ops invalidation_fence_ops = { * @fence: TLB invalidation fence to initialize * @stack: fence is stack variable * - * Initialize TLB invalidation fence for use + * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini + * must be called if fence is not signaled. */ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, bool stack) { + xe_pm_runtime_get_noresume(gt_to_xe(gt)); + spin_lock_irq(>->tlb_invalidation.lock); dma_fence_init(&fence->base, &invalidation_fence_ops, >->tlb_invalidation.lock, @@ -505,4 +512,16 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, set_bit(FENCE_STACK_BIT, &fence->base.flags); else dma_fence_get(&fence->base); + fence->gt = gt; +} + +/** + * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence + * @fence: TLB invalidation fence to finalize + * + * Drop PM ref which fence took durinig init. + */ +void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence) +{ + xe_pm_runtime_put(gt_to_xe(fence->gt)); } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index f430d5797af7..a84065fa324c 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -28,6 +28,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len); void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, bool stack); +void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence); static inline void xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence) diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h index 934c828efe31..de6e825e0851 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h @@ -8,6 +8,8 @@ #include +struct xe_gt; + /** * struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence * @@ -17,6 +19,8 @@ struct xe_gt_tlb_invalidation_fence { /** @base: dma fence base */ struct dma_fence base; + /** @gt: GT which fence belong to */ + struct xe_gt *gt; /** @link: link into list of pending tlb fences */ struct list_head link; /** @seqno: seqno of TLB invalidation to signal fence one */ diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 86e9e2f877e5..d8e099347df0 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3218,8 +3218,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) */ ret = xe_gt_tlb_invalidation_vma(tile->primary_gt, &fence[id], vma); - if (ret < 0) + if (ret < 0) { + xe_gt_tlb_invalidation_fence_fini(&fence[id]); goto wait; + } tile_needs_invalidate |= BIT(id); } From d930c19fdff3109e97b610fa10943b7602efcabd Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Fri, 19 Jul 2024 10:29:05 -0700 Subject: [PATCH 58/95] drm/xe: Build PM into GuC CT layer Take PM ref when any G2H are outstanding, drop when none are outstanding. To safely ensure we have PM ref when in the GuC CT layer, a PM ref needs to be held when scheduler messages are pending too. v2: - Add outer PM protections to xe_file_close (CI) v3: - Only take PM ref 0->1 and drop on 1->0 (Matthew Auld) v4: - Add assert to G2H increment function v5: - Rebase v6: - Declare xe as local variable in xe_file_close (CI) Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs") Cc: Matthew Auld Cc: Rodrigo Vivi Cc: Nirmoy Das Signed-off-by: Matthew Brost Reviewed-by: Matthew Auld Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20240719172905.1527927-5-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_device.c | 5 +++++ drivers/gpu/drm/xe/xe_guc_ct.c | 10 +++++++++- drivers/gpu/drm/xe/xe_guc_submit.c | 4 ++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 50c302cf3249..b677608eb592 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -141,11 +141,14 @@ void xe_file_put(struct xe_file *xef) static void xe_file_close(struct drm_device *dev, struct drm_file *file) { + struct xe_device *xe = to_xe_device(dev); struct xe_file *xef = file->driver_priv; struct xe_vm *vm; struct xe_exec_queue *q; unsigned long idx; + xe_pm_runtime_get(xe); + /* * No need for exec_queue.lock here as there is no contention for it * when FD is closing as IOCTLs presumably can't be modifying the @@ -162,6 +165,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file) mutex_unlock(&xef->vm.lock); xe_file_put(xef); + + xe_pm_runtime_put(xe); } static const struct drm_ioctl_desc xe_ioctls[] = { diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 7d2e937da1d8..64afc90ad2c5 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -327,6 +327,8 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct, xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 || state == XE_GUC_CT_STATE_STOPPED); + if (ct->g2h_outstanding) + xe_pm_runtime_put(ct_to_xe(ct)); ct->g2h_outstanding = 0; ct->state = state; @@ -495,10 +497,15 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len) static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) { xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space); + xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) || + (g2h_len && num_g2h)); if (g2h_len) { lockdep_assert_held(&ct->fast_lock); + if (!ct->g2h_outstanding) + xe_pm_runtime_get_noresume(ct_to_xe(ct)); + ct->ctbs.g2h.info.space -= g2h_len; ct->g2h_outstanding += num_g2h; } @@ -511,7 +518,8 @@ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); ct->ctbs.g2h.info.space += g2h_len; - --ct->g2h_outstanding; + if (!--ct->g2h_outstanding) + xe_pm_runtime_put(ct_to_xe(ct)); } static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 26f8c6a4f665..da2ead86b9ae 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1413,6 +1413,8 @@ static void guc_exec_queue_process_msg(struct xe_sched_msg *msg) default: XE_WARN_ON("Unknown message type"); } + + xe_pm_runtime_put(guc_to_xe(exec_queue_to_guc(msg->private_data))); } static const struct drm_sched_backend_ops drm_sched_ops = { @@ -1503,6 +1505,8 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q) static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg, u32 opcode) { + xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q))); + INIT_LIST_HEAD(&msg->link); msg->opcode = opcode; msg->private_data = q; From be1dcdffa8fadc22ad272f605a53e2a2fd30e7f8 Mon Sep 17 00:00:00 2001 From: Ohad Sharabi Date: Sun, 21 Jul 2024 10:13:35 +0300 Subject: [PATCH 59/95] drm/xe/oa: Don't use hardcoded values The current implementation uses hardcoded values instead of common defines. v2: - Make the commit a regular commit instead of a fixup commit - slightly modify commit message Signed-off-by: Ohad Sharabi Reviewed-by: Ashutosh Dixit Signed-off-by: Ashutosh Dixit Link: https://patchwork.freedesktop.org/patch/msgid/20240721071335.101234-1-osharabi@habana.ai --- drivers/gpu/drm/xe/xe_oa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 6d69f751bf78..3ef92eb8fbb1 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -641,7 +641,7 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, u32 offset = xe_bo_ggtt_addr(lrc->bo); do { - bb->cs[bb->len++] = MI_STORE_DATA_IMM | BIT(22) /* GGTT */ | 2; + bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1); bb->cs[bb->len++] = offset + flex->offset * sizeof(u32); bb->cs[bb->len++] = 0; bb->cs[bb->len++] = flex->value; From bd40536ae33911bc74df5ba3b29b573dcb72f904 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sat, 20 Jul 2024 16:25:22 +0200 Subject: [PATCH 60/95] drm/xe: Introduce const cast helper Typically we want to preserve pointer constness when converting from one xe pointer to another, but in some rare cases, like kunit parameter conversions, we might want to discard this constness. Add a helper that we will use to clearly indicate our intention. Signed-off-by: Michal Wajdeczko Reviewed-by: Jonathan Cavitt #v1 Cc: Lucas De Marchi Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240720142528.530-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_device.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 533ccfb2567a..db6cc8d0d6b8 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -20,6 +20,11 @@ static inline struct xe_device *pdev_to_xe_device(struct pci_dev *pdev) return pci_get_drvdata(pdev); } +static inline struct xe_device *xe_device_const_cast(const struct xe_device *xe) +{ + return (struct xe_device *)xe; +} + static inline struct xe_device *ttm_to_xe_device(struct ttm_device *ttm) { return container_of(ttm, struct xe_device, ttm); From 8bfab7cd8335b10bad92e44f58c5a038762bcbc3 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sat, 20 Jul 2024 16:25:23 +0200 Subject: [PATCH 61/95] drm/xe/tests: Add helpers for use in live tests Instead of iterating over available Xe devices within a testcase, without being able to distinguish potential failures from different devices on system with many Xe devices, introduce helpers that will allow to treat each Xe device as a parameter for the testcase like: static void bar(struct kunit *test) { struct xe_device *xe = test->priv; ... } struct kunit_case foo_live_tests[] = { KUNIT_CASE_PARAM(bar, xe_pci_live_device_gen_param), {} }; struct kunit_suite foo_suite = { .name = "foo_live", .test_cases = foo_live_tests, .init = xe_kunit_helper_xe_device_live_test_init, }; Signed-off-by: Michal Wajdeczko Reviewed-by: Jonathan Cavitt Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240720142528.530-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/xe_kunit_helpers.c | 39 +++++++++++++++++++++ drivers/gpu/drm/xe/tests/xe_kunit_helpers.h | 2 ++ drivers/gpu/drm/xe/tests/xe_pci.c | 30 ++++++++++++++++ drivers/gpu/drm/xe/tests/xe_pci_test.h | 2 ++ 4 files changed, 73 insertions(+) diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c index fefe79b3b75a..bc5156966ce9 100644 --- a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c +++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c @@ -12,7 +12,9 @@ #include "tests/xe_kunit_helpers.h" #include "tests/xe_pci_test.h" +#include "xe_device.h" #include "xe_device_types.h" +#include "xe_pm.h" /** * xe_kunit_helper_alloc_xe_device - Allocate a &xe_device for a KUnit test. @@ -88,3 +90,40 @@ int xe_kunit_helper_xe_device_test_init(struct kunit *test) return 0; } EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_test_init); + +KUNIT_DEFINE_ACTION_WRAPPER(put_xe_pm_runtime, xe_pm_runtime_put, struct xe_device *); + +/** + * xe_kunit_helper_xe_device_live_test_init - Prepare a &xe_device for + * use in a live KUnit test. + * @test: the &kunit where live &xe_device will be used + * + * This function expects pointer to the &xe_device in the &test.param_value, + * like it is prepared by the &xe_pci_live_device_gen_param and stores that + * pointer as &kunit.priv to allow the test code to access it. + * + * This function makes sure that device is not wedged and then resumes it + * to avoid waking up the device inside the test. It uses deferred cleanup + * action to release a runtime_pm reference. + * + * This function can be used as custom implementation of &kunit_suite.init. + * + * This function uses KUNIT_ASSERT to detect any failures. + * + * Return: Always 0. + */ +int xe_kunit_helper_xe_device_live_test_init(struct kunit *test) +{ + struct xe_device *xe = xe_device_const_cast(test->param_value); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); + kunit_info(test, "running on %s device\n", xe->info.platform_name); + + KUNIT_ASSERT_FALSE(test, xe_device_wedged(xe)); + xe_pm_runtime_get(xe); + KUNIT_ASSERT_EQ(test, 0, kunit_add_action_or_reset(test, put_xe_pm_runtime, xe)); + + test->priv = xe; + return 0; +} +EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_live_test_init); diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h index 067a1babf049..83665f7b1254 100644 --- a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h +++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h @@ -14,4 +14,6 @@ struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test, struct device *dev); int xe_kunit_helper_xe_device_test_init(struct kunit *test); +int xe_kunit_helper_xe_device_live_test_init(struct kunit *test); + #endif diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c index f62809ca8b51..577ee7d14381 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci.c +++ b/drivers/gpu/drm/xe/tests/xe_pci.c @@ -167,3 +167,33 @@ done: return 0; } EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init); + +/** + * xe_pci_live_device_gen_param - Helper to iterate Xe devices as KUnit parameters + * @prev: the previously returned value, or NULL for the first iteration + * @desc: the buffer for a parameter name + * + * Iterates over the available Xe devices on the system. Uses the device name + * as the parameter name. + * + * To be used only as a parameter generator function in &KUNIT_CASE_PARAM. + * + * Return: pointer to the next &struct xe_device ready to be used as a parameter + * or NULL if there are no more Xe devices on the system. + */ +const void *xe_pci_live_device_gen_param(const void *prev, char *desc) +{ + const struct xe_device *xe = prev; + struct device *dev = xe ? xe->drm.dev : NULL; + struct device *next; + + next = driver_find_next_device(&xe_pci_driver.driver, dev); + if (dev) + put_device(dev); + if (!next) + return NULL; + + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", dev_name(next)); + return pdev_to_xe_device(to_pci_dev(next)); +} +EXPORT_SYMBOL_IF_KUNIT(xe_pci_live_device_gen_param); diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h index f40dcec83992..3e2558bc3c90 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci_test.h +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h @@ -35,4 +35,6 @@ struct xe_pci_fake_data { int xe_pci_fake_device_init(struct xe_device *xe); +const void *xe_pci_live_device_gen_param(const void *prev, char *desc); + #endif From e17eedf56ea7afacd919a3d6cc75a617cddd003a Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sat, 20 Jul 2024 16:25:24 +0200 Subject: [PATCH 62/95] drm/xe/tests: Convert xe_bo live tests Convert xe_bo live tests to parameterized style. Signed-off-by: Michal Wajdeczko Reviewed-by: Jonathan Cavitt Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240720142528.530-4-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/xe_bo.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index e2e0ea24757a..1768483da1b7 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -6,6 +6,7 @@ #include #include +#include "tests/xe_kunit_helpers.h" #include "tests/xe_pci_test.h" #include "tests/xe_test.h" @@ -158,13 +159,13 @@ static int ccs_test_run_device(struct xe_device *xe) int id; if (!xe_device_has_flat_ccs(xe)) { - kunit_info(test, "Skipping non-flat-ccs device.\n"); + kunit_skip(test, "non-flat-ccs device\n"); return 0; } /* For xe2+ dgfx, we don't handle ccs metadata */ if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) { - kunit_info(test, "Skipping on xe2+ dgfx device.\n"); + kunit_skip(test, "xe2+ dgfx device\n"); return 0; } @@ -184,7 +185,9 @@ static int ccs_test_run_device(struct xe_device *xe) static void xe_ccs_migrate_kunit(struct kunit *test) { - xe_call_for_each_device(ccs_test_run_device); + struct xe_device *xe = test->priv; + + ccs_test_run_device(xe); } static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test) @@ -334,8 +337,7 @@ static int evict_test_run_device(struct xe_device *xe) int id; if (!IS_DGFX(xe)) { - kunit_info(test, "Skipping non-discrete device %s.\n", - dev_name(xe->drm.dev)); + kunit_skip(test, "non-discrete device\n"); return 0; } @@ -351,12 +353,14 @@ static int evict_test_run_device(struct xe_device *xe) static void xe_bo_evict_kunit(struct kunit *test) { - xe_call_for_each_device(evict_test_run_device); + struct xe_device *xe = test->priv; + + evict_test_run_device(xe); } static struct kunit_case xe_bo_tests[] = { - KUNIT_CASE(xe_ccs_migrate_kunit), - KUNIT_CASE(xe_bo_evict_kunit), + KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param), + KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param), {} }; @@ -364,5 +368,6 @@ VISIBLE_IF_KUNIT struct kunit_suite xe_bo_test_suite = { .name = "xe_bo", .test_cases = xe_bo_tests, + .init = xe_kunit_helper_xe_device_live_test_init, }; EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite); From 8f3d86f13eb2b37d86fc48df8995dffd9b7d9010 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sat, 20 Jul 2024 16:25:25 +0200 Subject: [PATCH 63/95] drm/xe/tests: Convert xe_dma_buf live tests Convert xe_dma_buf live tests to parameterized style. Signed-off-by: Michal Wajdeczko Reviewed-by: Jonathan Cavitt Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240720142528.530-5-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/xe_dma_buf.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c index 4f9dc41e13de..c24c8509227e 100644 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c @@ -8,6 +8,7 @@ #include #include +#include "tests/xe_kunit_helpers.h" #include "tests/xe_pci_test.h" #include "xe_pci.h" @@ -275,11 +276,13 @@ static int dma_buf_run_device(struct xe_device *xe) static void xe_dma_buf_kunit(struct kunit *test) { - xe_call_for_each_device(dma_buf_run_device); + struct xe_device *xe = test->priv; + + dma_buf_run_device(xe); } static struct kunit_case xe_dma_buf_tests[] = { - KUNIT_CASE(xe_dma_buf_kunit), + KUNIT_CASE_PARAM(xe_dma_buf_kunit, xe_pci_live_device_gen_param), {} }; @@ -287,5 +290,6 @@ VISIBLE_IF_KUNIT struct kunit_suite xe_dma_buf_test_suite = { .name = "xe_dma_buf", .test_cases = xe_dma_buf_tests, + .init = xe_kunit_helper_xe_device_live_test_init, }; EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_test_suite); From 37db1e776285518a7e1269b868d094a4532b2d54 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sat, 20 Jul 2024 16:25:26 +0200 Subject: [PATCH 64/95] drm/xe/tests: Convert xe_migrate live tests Convert xe_migrate live tests to parameterized style. Signed-off-by: Michal Wajdeczko Reviewed-by: Jonathan Cavitt Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240720142528.530-6-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/xe_migrate.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index 48bf7e831014..4344a1724029 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -6,6 +6,7 @@ #include #include +#include "tests/xe_kunit_helpers.h" #include "tests/xe_pci_test.h" #include "xe_pci.h" @@ -355,7 +356,9 @@ static int migrate_test_run_device(struct xe_device *xe) static void xe_migrate_sanity_kunit(struct kunit *test) { - xe_call_for_each_device(migrate_test_run_device); + struct xe_device *xe = test->priv; + + migrate_test_run_device(xe); } static struct dma_fence *blt_copy(struct xe_tile *tile, @@ -731,13 +734,12 @@ static int validate_ccs_test_run_device(struct xe_device *xe) int id; if (!xe_device_has_flat_ccs(xe)) { - kunit_info(test, "Skipping non-flat-ccs device.\n"); + kunit_skip(test, "non-flat-ccs device\n"); return 0; } if (!(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))) { - kunit_info(test, "Skipping non-xe2 discrete device %s.\n", - dev_name(xe->drm.dev)); + kunit_skip(test, "non-xe2 discrete device\n"); return 0; } @@ -753,12 +755,14 @@ static int validate_ccs_test_run_device(struct xe_device *xe) static void xe_validate_ccs_kunit(struct kunit *test) { - xe_call_for_each_device(validate_ccs_test_run_device); + struct xe_device *xe = test->priv; + + validate_ccs_test_run_device(xe); } static struct kunit_case xe_migrate_tests[] = { - KUNIT_CASE(xe_migrate_sanity_kunit), - KUNIT_CASE(xe_validate_ccs_kunit), + KUNIT_CASE_PARAM(xe_migrate_sanity_kunit, xe_pci_live_device_gen_param), + KUNIT_CASE_PARAM(xe_validate_ccs_kunit, xe_pci_live_device_gen_param), {} }; @@ -766,5 +770,6 @@ VISIBLE_IF_KUNIT struct kunit_suite xe_migrate_test_suite = { .name = "xe_migrate", .test_cases = xe_migrate_tests, + .init = xe_kunit_helper_xe_device_live_test_init, }; EXPORT_SYMBOL_IF_KUNIT(xe_migrate_test_suite); From 57ecead343e724b6ea38e9c2d8f794b293cf2951 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sat, 20 Jul 2024 16:25:27 +0200 Subject: [PATCH 65/95] drm/xe/tests: Convert xe_mocs live tests Convert xe_mocs live tests to parameterized style. Signed-off-by: Michal Wajdeczko Reviewed-by: Jonathan Cavitt Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240720142528.530-7-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/xe_mocs.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index febc1d967850..19554019b255 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -6,6 +6,7 @@ #include #include +#include "tests/xe_kunit_helpers.h" #include "tests/xe_pci_test.h" #include "tests/xe_test.h" @@ -135,7 +136,9 @@ static int mocs_kernel_test_run_device(struct xe_device *xe) static void xe_live_mocs_kernel_kunit(struct kunit *test) { - xe_call_for_each_device(mocs_kernel_test_run_device); + struct xe_device *xe = test->priv; + + mocs_kernel_test_run_device(xe); } static int mocs_reset_test_run_device(struct xe_device *xe) @@ -175,12 +178,14 @@ static int mocs_reset_test_run_device(struct xe_device *xe) static void xe_live_mocs_reset_kunit(struct kunit *test) { - xe_call_for_each_device(mocs_reset_test_run_device); + struct xe_device *xe = test->priv; + + mocs_reset_test_run_device(xe); } static struct kunit_case xe_mocs_tests[] = { - KUNIT_CASE(xe_live_mocs_kernel_kunit), - KUNIT_CASE(xe_live_mocs_reset_kunit), + KUNIT_CASE_PARAM(xe_live_mocs_kernel_kunit, xe_pci_live_device_gen_param), + KUNIT_CASE_PARAM(xe_live_mocs_reset_kunit, xe_pci_live_device_gen_param), {} }; @@ -188,5 +193,6 @@ VISIBLE_IF_KUNIT struct kunit_suite xe_mocs_test_suite = { .name = "xe_mocs", .test_cases = xe_mocs_tests, + .init = xe_kunit_helper_xe_device_live_test_init, }; EXPORT_SYMBOL_IF_KUNIT(xe_mocs_test_suite); From cf1e6edbd1c8ba654f97071ccb748d87ef0115aa Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sat, 20 Jul 2024 16:25:28 +0200 Subject: [PATCH 66/95] drm/xe/tests: Skip xe_mocs live tests on VF device There is no point to run those tests on VFs devices as they can't access any of the MOCS registers. Skip testing on the VF device. [ ] =================== xe_mocs (1 subtest) ==================== [ ] ================ xe_live_mocs_kernel_kunit ================ [ ] [PASSED] 0000:4d:00.0 [ ] [SKIPPED] 0000:4d:00.1 [ ] ============ [PASSED] xe_live_mocs_kernel_kunit ============ [ ] ===================== [PASSED] xe_mocs ===================== Signed-off-by: Michal Wajdeczko Reviewed-by: Jonathan Cavitt Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240720142528.530-8-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/xe_mocs.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index 19554019b255..79be73b4a02b 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -138,6 +138,9 @@ static void xe_live_mocs_kernel_kunit(struct kunit *test) { struct xe_device *xe = test->priv; + if (IS_SRIOV_VF(xe)) + kunit_skip(test, "this test is N/A for VF"); + mocs_kernel_test_run_device(xe); } @@ -180,6 +183,9 @@ static void xe_live_mocs_reset_kunit(struct kunit *test) { struct xe_device *xe = test->priv; + if (IS_SRIOV_VF(xe)) + kunit_skip(test, "this test is N/A for VF"); + mocs_reset_test_run_device(xe); } From 9790bbe3ba48b659c0152f49552f6abc18f85bc8 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 15 Jul 2024 20:05:37 +0200 Subject: [PATCH 67/95] drm/xe: Normalize NEEDS_64K BO flag In commit 62742d126631 ("drm/xe: Normalize bo flags macros"), we normalized all BO flags but XE_BO_NEEDS_64K. Do it now. Signed-off-by: Michal Wajdeczko Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240715180538.1418-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_bo.c | 2 +- drivers/gpu/drm/xe/xe_bo.h | 2 +- drivers/gpu/drm/xe/xe_lmtt.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 31192d983d9e..da35b4a21739 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1264,7 +1264,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) && !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) && ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || - (flags & XE_BO_NEEDS_64K))) { + (flags & XE_BO_FLAG_NEEDS_64K))) { aligned_size = ALIGN(size, SZ_64K); if (type != ttm_bo_type_device) size = ALIGN(size, SZ_64K); diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 6de894c728f5..2afc50132c2b 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -36,7 +36,7 @@ #define XE_BO_FLAG_PAGETABLE BIT(12) #define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13) #define XE_BO_FLAG_NEEDS_UC BIT(14) -#define XE_BO_NEEDS_64K BIT(15) +#define XE_BO_FLAG_NEEDS_64K BIT(15) #define XE_BO_FLAG_GGTT_INVALIDATE BIT(16) /* this one is trigger internally only */ #define XE_BO_FLAG_INTERNAL_TEST BIT(30) diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index c5fdb36b6d33..8999ac511555 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level lmtt->ops->lmtt_pte_num(level)), ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) | - XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED); + XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED); if (IS_ERR(bo)) { err = PTR_ERR(bo); goto out_free_pt; From 25ec7e809cc1fe9f01a71199d29092f6f1b4611c Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 15 Jul 2024 20:05:38 +0200 Subject: [PATCH 68/95] drm/xe: Add NEEDS_2M BO flag In addition of NEEDS_64K BO flag, add similar one to force 2 MiB alignment of the buffer objects. Explicitly use this flag during VF LMEM provisioning as LMTT uses 2 MiB pages and one day we may drop requirement of allocating pinned objects as contiguous. Signed-off-by: Michal Wajdeczko Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240715180538.1418-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_bo.c | 13 +++++++------ drivers/gpu/drm/xe/xe_bo.h | 3 ++- drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 1 + 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index da35b4a21739..3295bc92d7aa 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1264,13 +1264,14 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) && !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) && ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || - (flags & XE_BO_FLAG_NEEDS_64K))) { - aligned_size = ALIGN(size, SZ_64K); - if (type != ttm_bo_type_device) - size = ALIGN(size, SZ_64K); - flags |= XE_BO_FLAG_INTERNAL_64K; - alignment = SZ_64K >> PAGE_SHIFT; + (flags & (XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_NEEDS_2M)))) { + size_t align = flags & XE_BO_FLAG_NEEDS_2M ? SZ_2M : SZ_64K; + aligned_size = ALIGN(size, align); + if (type != ttm_bo_type_device) + size = ALIGN(size, align); + flags |= XE_BO_FLAG_INTERNAL_64K; + alignment = align >> PAGE_SHIFT; } else { aligned_size = ALIGN(size, SZ_4K); flags &= ~XE_BO_FLAG_INTERNAL_64K; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 2afc50132c2b..1c9dc8adaaa3 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -37,7 +37,8 @@ #define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13) #define XE_BO_FLAG_NEEDS_UC BIT(14) #define XE_BO_FLAG_NEEDS_64K BIT(15) -#define XE_BO_FLAG_GGTT_INVALIDATE BIT(16) +#define XE_BO_FLAG_NEEDS_2M BIT(16) +#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17) /* this one is trigger internally only */ #define XE_BO_FLAG_INTERNAL_TEST BIT(30) #define XE_BO_FLAG_INTERNAL_64K BIT(31) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 4699b7836001..52c7277d243d 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -1401,6 +1401,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) ALIGN(size, PAGE_SIZE), ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_NEEDS_2M | XE_BO_FLAG_PINNED); if (IS_ERR(bo)) return PTR_ERR(bo); From 7e0fe233aa0849de8a0a5e60cb0d71dfb359664f Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 19 Jul 2024 12:15:34 -0700 Subject: [PATCH 69/95] drm/xe: Fix warning on unreachable statement eu_type_to_str() relies on -Wswitch to warn (and -Werror) to make sure it handles all enum values. However it's perfectly legal to pass an int to that function so in the end that function may happen to return nothing. There's too much implicit knowledge about the initialization of eu_type for a compiler to notice eu_type is never assigned to anything other than those values. Trying to reproduce this issue, none of gcc-9, gcc-10 and gcc-13 triggered for me, but this was reported in a different system with gcc-10: drivers/gpu/drm/xe/xe.o: warning: objtool: xe_gt_topology_dump() falls through to next function xe_gt_topology_init() Also it was reported these warnings when building with clang: drivers/gpu/drm/xe/xe.o: warning: objtool: xe_gt_topology_dump+0x77: sibling call from callable instruction with modified stack frame drivers/gpu/drm/xe/xe.o: warning: objtool: xe_gt_topology_dump() falls through to next function xe_dss_mask_group_ffs() drivers/gpu/drm/xe/xe.o: warning: objtool: xe_gt_topology_dump+0x77: can't find jump dest instruction at .text.xe_gt_topology_dump+0xc0 Since that value is not really possible in real world, just take the simple approach and return NULL. Fixes: 7108b4a589cd ("drm/xe/uapi: Expose SIMD16 EU mask in topology query") Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20240719191534.3845469-1-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_gt_topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c index 5a1559edf3e9..0662f71c6ede 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.c +++ b/drivers/gpu/drm/xe/xe_gt_topology.c @@ -233,7 +233,7 @@ static const char *eu_type_to_str(enum xe_gt_eu_type eu_type) return "simd8"; } - unreachable(); + return NULL; } void From 649b93dbb902ae3237fddbe998eb1f4de1a14b71 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 22 Jul 2024 18:02:30 -0700 Subject: [PATCH 70/95] drm/xe: Fix xe_pt_abort_unbind When restoring the children PT entries on a bind failure the incorrect loop index has used resulting in PT entries being leaked. This is shown by running xe_vm.bind-array-conflict-error-inject on a VRAM device going into a suspend state after the test completes. v2: - s/childern/children (CI, Matt Auld) Fixes: a708f6501c69 ("drm/xe: Update PT layer with better error handling") Cc: Matthew Auld Signed-off-by: Matthew Brost Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20240723010230.1652707-1-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_pt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index c24e869b7eae..97a6a0b0b8ba 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -1564,7 +1564,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma, continue; for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) - pt_dir->children[i] = + pt_dir->children[j] = entries[i].pt_entries[j - entry->ofs].pt ? &entries[i].pt_entries[j - entry->ofs].pt->base : NULL; } From c8a31ff6199f12ca65d73f1235117c1d9e6365a9 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 22 Jul 2024 18:17:02 -0700 Subject: [PATCH 71/95] drm/xe: Return -ENOBUFS if a kmalloc fails which is tied to an array of binds The size of an array of binds is directly tied to several kmalloc in the KMD, thus making these kmalloc more likely to fail. Return -ENOBUFS in the case of these failures. The expected UMD behavior upon returning -ENOBUFS is to split an array of binds into a series of single binds. v2: - Resend for CI v3: - Resend for CI Cc: Paulo Zanoni Signed-off-by: Matthew Brost Reviewed-by: Himal Prasad Ghimiray Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20240723011702.1684013-1-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_vm.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index d8e099347df0..f225107bdd65 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -718,7 +718,7 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm) list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; } -static int xe_vma_ops_alloc(struct xe_vma_ops *vops) +static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds) { int i; @@ -731,7 +731,7 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops) sizeof(*vops->pt_update_ops[i].ops), GFP_KERNEL); if (!vops->pt_update_ops[i].ops) - return -ENOMEM; + return array_of_binds ? -ENOBUFS : -ENOMEM; } return 0; @@ -824,7 +824,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) goto free_ops; } - err = xe_vma_ops_alloc(&vops); + err = xe_vma_ops_alloc(&vops, false); if (err) goto free_ops; @@ -871,7 +871,7 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma if (err) return ERR_PTR(err); - err = xe_vma_ops_alloc(&vops); + err = xe_vma_ops_alloc(&vops, false); if (err) { fence = ERR_PTR(err); goto free_ops; @@ -2761,7 +2761,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, sizeof(struct drm_xe_vm_bind_op), GFP_KERNEL | __GFP_ACCOUNT); if (!*bind_ops) - return -ENOMEM; + return args->num_binds > 1 ? -ENOBUFS : -ENOMEM; err = __copy_from_user(*bind_ops, bind_user, sizeof(struct drm_xe_vm_bind_op) * @@ -3100,7 +3100,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto unwind_ops; } - err = xe_vma_ops_alloc(&vops); + err = xe_vma_ops_alloc(&vops, args->num_binds > 1); if (err) goto unwind_ops; From 8af13c3fc1259c1b8cfc4459d3701fcf41ad74df Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Tue, 23 Jul 2024 08:10:45 -0700 Subject: [PATCH 72/95] drm/xe: Store process name and pid in xe file An xe file can outlive the associated process as the GPU cleanup is just triggered upon file close (process kill) and completes sometime later. If the file close triggers error conditions (GPU hangs) the process cannot be safely referenced to retrieve the name and pid for debug information. Store the process name and pid directly in the xe file to be safe. v2: - Access file->pid via rcu_access_pointer (Matthew Auld) Fixes: b10d0c5e9df7 ("drm/xe: Add process name to devcoredump") Fixes: f6ca930d974e ("drm/xe: Add process name and PID to job timedout message") Signed-off-by: Matthew Brost Acked-by: Rodrigo Vivi Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20240723151045.1725417-1-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_devcoredump.c | 10 ++-------- drivers/gpu/drm/xe/xe_device.c | 9 +++++++++ drivers/gpu/drm/xe/xe_device_types.h | 12 ++++++++++++ drivers/gpu/drm/xe/xe_guc_submit.c | 10 ++-------- 4 files changed, 25 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 62c2b10fbf1d..d8d8ca2c19d3 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -171,7 +171,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, u32 adj_logical_mask = q->logical_mask; u32 width_mask = (0x1 << q->width) - 1; const char *process_name = "no process"; - struct task_struct *task = NULL; int i; bool cookie; @@ -179,14 +178,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, ss->snapshot_time = ktime_get_real(); ss->boot_time = ktime_get_boottime(); - if (q->vm && q->vm->xef) { - task = get_pid_task(q->vm->xef->drm->pid, PIDTYPE_PID); - if (task) - process_name = task->comm; - } + if (q->vm && q->vm->xef) + process_name = q->vm->xef->process_name; strscpy(ss->process_name, process_name); - if (task) - put_task_struct(task); ss->gt = q->gt; INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work); diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index b677608eb592..1aba6f9eaa19 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -64,6 +64,7 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) struct xe_drm_client *client; struct xe_file *xef; int ret = -ENOMEM; + struct task_struct *task = NULL; xef = kzalloc(sizeof(*xef), GFP_KERNEL); if (!xef) @@ -92,6 +93,13 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) file->driver_priv = xef; kref_init(&xef->refcount); + task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID); + if (task) { + xef->process_name = kstrdup(task->comm, GFP_KERNEL); + xef->pid = task->pid; + put_task_struct(task); + } + return 0; } @@ -110,6 +118,7 @@ static void xe_file_destroy(struct kref *ref) spin_unlock(&xe->clients.lock); xe_drm_client_put(xef->client); + kfree(xef->process_name); kfree(xef); } diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 36252d5b1663..5b7292a9a66d 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -582,6 +582,18 @@ struct xe_file { /** @client: drm client */ struct xe_drm_client *client; + /** + * @process_name: process name for file handle, used to safely output + * during error situations where xe file can outlive process + */ + char *process_name; + + /** + * @pid: pid for file handle, used to safely output uring error + * situations where xe file can outlive process + */ + pid_t pid; + /** @refcount: ref count of this xe file */ struct kref refcount; }; diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index da2ead86b9ae..a4570631926f 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1072,7 +1072,6 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) struct xe_gpu_scheduler *sched = &q->guc->sched; struct xe_guc *guc = exec_queue_to_guc(q); const char *process_name = "no process"; - struct task_struct *task = NULL; int err = -ETIME; pid_t pid = -1; int i = 0; @@ -1172,17 +1171,12 @@ trigger_reset: } if (q->vm && q->vm->xef) { - task = get_pid_task(q->vm->xef->drm->pid, PIDTYPE_PID); - if (task) { - process_name = task->comm; - pid = task->pid; - } + process_name = q->vm->xef->process_name; + pid = q->vm->xef->pid; } xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]", xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job), q->guc->id, q->flags, process_name, pid); - if (task) - put_task_struct(task); trace_xe_sched_job_timedout(job); From 6482253e6e1ad1c3a76645a3899d3cfdb5b918cb Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Tue, 23 Jul 2024 12:07:14 -0700 Subject: [PATCH 73/95] drm/xe: Remove fence check from send_tlb_invalidation 'fence' argument in send_tlb_invalidation cannot be NULL, remove non-NULL check from send_tlb_invalidation. Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202407231049.esig0Fkb-lkp@intel.com/ Fixes: 61ac035361ae ("drm/xe: Drop xe_gt_tlb_invalidation_wait") Signed-off-by: Matthew Brost Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20240723190714.1744653-1-matthew.brost@intel.com Signed-off-by: Nirmoy Das --- drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 481d83d07367..87cb76a8718c 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -182,7 +182,7 @@ static int send_tlb_invalidation(struct xe_guc *guc, action[1] = seqno; ret = xe_guc_ct_send_locked(&guc->ct, action, len, G2H_LEN_DW_TLB_INVALIDATE, 1); - if (!ret && fence) { + if (!ret) { spin_lock_irq(>->tlb_invalidation.pending_lock); /* * We haven't actually published the TLB fence as per @@ -203,7 +203,7 @@ static int send_tlb_invalidation(struct xe_guc *guc, tlb_timeout_jiffies(gt)); } spin_unlock_irq(>->tlb_invalidation.pending_lock); - } else if (ret < 0 && fence) { + } else if (ret < 0) { __invalidation_fence_signal(xe, fence); } if (!ret) { From a2b4da9501c120d60413065169f140bc3d2e5991 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 22 Jul 2024 21:54:57 -0700 Subject: [PATCH 74/95] drm/xe: Refactor mmio setup for multi-tile Extract functions to setup the multi-tile mmio space and extension space, while better documenting the final memory layout. No change in behavior. Reviewed-by: Gustavo Sousa Link: https://patchwork.freedesktop.org/patch/msgid/20240723045558.288693-1-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_mmio.c | 99 ++++++++++++++++++++++++++++-------- 1 file changed, 77 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index ea3c37d3e13f..435c01d003a8 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -33,29 +33,56 @@ static void tiles_fini(void *arg) tile->mmio.regs = NULL; } -int xe_mmio_probe_tiles(struct xe_device *xe) +/* + * On multi-tile devices, partition the BAR space for MMIO on each tile, + * possibly accounting for register override on the number of tiles available. + * Resulting memory layout is like below: + * + * .----------------------. <- tile_count * tile_mmio_size + * | .... | + * |----------------------| <- 2 * tile_mmio_size + * | tile1->mmio.regs | + * |----------------------| <- 1 * tile_mmio_size + * | tile0->mmio.regs | + * '----------------------' <- 0MB + */ +static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) { - size_t tile_mmio_size = SZ_16M, tile_mmio_ext_size = xe->info.tile_mmio_ext_size; - u8 id, tile_count = xe->info.tile_count; - struct xe_gt *gt = xe_root_mmio_gt(xe); struct xe_tile *tile; void __iomem *regs; - u32 mtcfg; + u8 id; - if (tile_count == 1) - goto add_mmio_ext; + /* + * Nothing to be done as tile 0 has already been setup earlier with the + * entire BAR mapped - see xe_mmio_init() + */ + if (xe->info.tile_count == 1) + return; + /* Possibly override number of tile based on configuration register */ if (!xe->info.skip_mtcfg) { + struct xe_gt *gt = xe_root_mmio_gt(xe); + u8 tile_count; + u32 mtcfg; + + /* + * Although the per-tile mmio regs are not yet initialized, this + * is fine as it's going to the root gt, that's guaranteed to be + * initialized earlier in xe_mmio_init() + */ mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR); tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1; + if (tile_count < xe->info.tile_count) { drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n", xe->info.tile_count, tile_count); xe->info.tile_count = tile_count; /* - * FIXME: Needs some work for standalone media, but should be impossible - * with multi-tile for now. + * FIXME: Needs some work for standalone media, but + * should be impossible with multi-tile for now: + * multi-tile platform with standalone media doesn't + * exist */ xe->info.gt_count = xe->info.tile_count; } @@ -67,23 +94,51 @@ int xe_mmio_probe_tiles(struct xe_device *xe) tile->mmio.regs = regs; regs += tile_mmio_size; } +} -add_mmio_ext: - /* - * By design, there's a contiguous multi-tile MMIO space (16MB hard coded per tile). - * When supported, there could be an additional contiguous multi-tile MMIO extension - * space ON TOP of it, and hence the necessity for distinguished MMIO spaces. - */ - if (xe->info.has_mmio_ext) { - regs = xe->mmio.regs + tile_mmio_size * tile_count; +/* + * On top of all the multi-tile MMIO space there can be a platform-dependent + * extension for each tile, resulting in a layout like below: + * + * .----------------------. <- ext_base + tile_count * tile_mmio_ext_size + * | .... | + * |----------------------| <- ext_base + 2 * tile_mmio_ext_size + * | tile1->mmio_ext.regs | + * |----------------------| <- ext_base + 1 * tile_mmio_ext_size + * | tile0->mmio_ext.regs | + * |======================| <- ext_base = tile_count * tile_mmio_size + * | | + * | mmio.regs | + * | | + * '----------------------' <- 0MB + * + * Set up the tile[]->mmio_ext pointers/sizes. + */ +static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size, + size_t tile_mmio_ext_size) +{ + struct xe_tile *tile; + void __iomem *regs; + u8 id; - for_each_tile(tile, xe, id) { - tile->mmio_ext.size = tile_mmio_ext_size; - tile->mmio_ext.regs = regs; + if (!xe->info.has_mmio_ext) + return; - regs += tile_mmio_ext_size; - } + regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count; + for_each_tile(tile, xe, id) { + tile->mmio_ext.size = tile_mmio_ext_size; + tile->mmio_ext.regs = regs; + regs += tile_mmio_ext_size; } +} + +int xe_mmio_probe_tiles(struct xe_device *xe) +{ + size_t tile_mmio_size = SZ_16M; + size_t tile_mmio_ext_size = xe->info.tile_mmio_ext_size; + + mmio_multi_tile_setup(xe, tile_mmio_size); + mmio_extension_setup(xe, tile_mmio_size, tile_mmio_ext_size); return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe); } From 66ac3451fb467ce44fb1d4517f91db7390b9fa6b Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 22 Jul 2024 09:06:39 -0700 Subject: [PATCH 75/95] drm/xe: Add assert for XE_WA() usage It's not always safe to call XE_WA() in the driver initialization. Add a xe_gt_assert() so this doesn't go unnoticed. While at it, fix typo in kernel-doc about OOB workarounds. Reviewed-by: Tejas Upadhyay Link: https://patchwork.freedesktop.org/patch/msgid/20240722160815.4085605-1-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_gt_types.h | 8 +++++++- drivers/gpu/drm/xe/xe_wa.c | 1 + drivers/gpu/drm/xe/xe_wa.h | 7 ++++++- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index ef68c4a92972..631928258d71 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -387,8 +387,14 @@ struct xe_gt { unsigned long *engine; /** @wa_active.lrc: bitmap with active LRC workarounds */ unsigned long *lrc; - /** @wa_active.oob: bitmap with active OOB workaroudns */ + /** @wa_active.oob: bitmap with active OOB workarounds */ unsigned long *oob; + /** + * @wa_active.oob_initialized: mark oob as initialized to help + * detecting misuse of XE_WA() - it can only be called on + * initialization after OOB WAs have being processed + */ + bool oob_initialized; } wa_active; /** @user_engines: engines present in GT and available to userspace */ diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index fd009b2c68fa..564e32e44e3b 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -755,6 +755,7 @@ void xe_wa_process_oob(struct xe_gt *gt) xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.oob, ARRAY_SIZE(oob_was)); + gt->wa_active.oob_initialized = true; xe_rtp_process(&ctx, oob_was); } diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h index db9ddeaf69bf..52337405b5bc 100644 --- a/drivers/gpu/drm/xe/xe_wa.h +++ b/drivers/gpu/drm/xe/xe_wa.h @@ -6,6 +6,8 @@ #ifndef _XE_WA_ #define _XE_WA_ +#include "xe_assert.h" + struct drm_printer; struct xe_gt; struct xe_hw_engine; @@ -25,6 +27,9 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p); * @gt__: gt instance * @id__: XE_OOB_, as generated by build system in generated/xe_wa_oob.h */ -#define XE_WA(gt__, id__) test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob) +#define XE_WA(gt__, id__) ({ \ + xe_gt_assert(gt__, (gt__)->wa_active.oob_initialized); \ + test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \ +}) #endif From b46119578167150810184494b2a097c40e2144b8 Mon Sep 17 00:00:00 2001 From: Himal Prasad Ghimiray Date: Mon, 15 Jul 2024 16:25:54 +0530 Subject: [PATCH 76/95] drm/xe: Delete unused register from xe_regs.h Register SOFTWARE_FLAGS_SPR33 is unused; therefore, delete it. Cc: Michal Wajdeczko Signed-off-by: Himal Prasad Ghimiray Reviewed-by: Tejas Upadhyay Link: https://patchwork.freedesktop.org/patch/msgid/20240715105555.2220444-2-himal.prasad.ghimiray@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/regs/xe_regs.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h index 55bf47c99016..dfa869f0dddd 100644 --- a/drivers/gpu/drm/xe/regs/xe_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_regs.h @@ -15,8 +15,6 @@ #define GU_MISC_IRQ_OFFSET 0x444f0 #define GU_MISC_GSE REG_BIT(27) -#define SOFTWARE_FLAGS_SPR33 XE_REG(0x4f084) - #define GU_CNTL_PROTECTED XE_REG(0x10100C) #define DRIVERINT_FLR_DIS REG_BIT(31) From 1a394b4f504f33eac8c38b6f42ba025105c7e869 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 24 Jul 2024 09:43:41 -0700 Subject: [PATCH 77/95] drm/xe: Fix possible UAF in guc_exec_queue_process_msg Store xe_device ahead of processing message as message can be free'd in some cases. v2: - Including missing local changes v3: - Resend for CI Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202407231445.rpisd1vA-lkp@intel.com/ Fixes: d930c19fdff3 ("drm/xe: Build PM into GuC CT layer") Signed-off-by: Matthew Brost Reviewed-by: Himal Prasad Ghimiray Link: https://patchwork.freedesktop.org/patch/msgid/20240724164341.1848954-1-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_guc_submit.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index a4570631926f..460808507947 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1389,6 +1389,8 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg) static void guc_exec_queue_process_msg(struct xe_sched_msg *msg) { + struct xe_device *xe = guc_to_xe(exec_queue_to_guc(msg->private_data)); + trace_xe_sched_msg_recv(msg); switch (msg->opcode) { @@ -1408,7 +1410,7 @@ static void guc_exec_queue_process_msg(struct xe_sched_msg *msg) XE_WARN_ON("Unknown message type"); } - xe_pm_runtime_put(guc_to_xe(exec_queue_to_guc(msg->private_data))); + xe_pm_runtime_put(xe); } static const struct drm_sched_backend_ops drm_sched_ops = { From e814902866ed22bc07d2af1303f3183357a438e8 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2024 17:32:21 -0700 Subject: [PATCH 78/95] drm/xe/huc: Define HuC binary for LNL Add the unversioned define for the LNL HuC FW. All new binaries are GSC-enabled (and even if they weren't the driver can detect the type of HuC binary), so the new lnl HuC filename doesn't use the _gsc postfix to avoid confusion with the GSC binary. Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240725003224.516233-5-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/xe/xe_uc_fw.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 5f23ecd98376..5fc34f5380df 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -116,6 +116,7 @@ struct fw_blobs_by_type { fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) #define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \ + fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \ fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \ fw_def(DG1, no_ver(i915, huc, dg1)) \ fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \ From f4aa02c43969c36213a7fd4e56831693fc83ec4b Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2024 17:32:22 -0700 Subject: [PATCH 79/95] drm/xe/gsc: Define GSC binary for LNL As with previous binaries, we match the compatibility version instead of the build number. Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240725003224.516233-6-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/xe/xe_uc_fw.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 5fc34f5380df..883eb619de5a 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -126,6 +126,7 @@ struct fw_blobs_by_type { /* for the GSC FW we match the compatibility version and not the release one */ #define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \ + fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 1, 0, 0)) \ fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0, 0)) #define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \ From 351a88713b6daae5435d683ddab1913a65d09eb2 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2024 17:32:23 -0700 Subject: [PATCH 80/95] drm/xe/huc: Define HuC binary for BMG Add the unversioned define for the BMG HuC FW. Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240725003224.516233-7-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/xe/xe_uc_fw.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 883eb619de5a..5b70d23724c4 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -116,6 +116,7 @@ struct fw_blobs_by_type { fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) #define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \ + fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \ fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \ fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \ fw_def(DG1, no_ver(i915, huc, dg1)) \ From 641a118c9d91e53bc920ec0ea2ea350d84eb7a9d Mon Sep 17 00:00:00 2001 From: Gustavo Sousa Date: Mon, 22 Jul 2024 16:05:25 -0300 Subject: [PATCH 81/95] drm/xe: Remove stale declaration of xe_mmio_probe_vram() The declaration of xe_mmio_probe_vram() became useless since commit 638d1c79cbf1 ("drm/xe: Promote VRAM initialization function to own file"). Remove it. Signed-off-by: Gustavo Sousa Reviewed-by: Michal Wajdeczko Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240722190524.71548-2-gustavo.sousa@intel.com --- drivers/gpu/drm/xe/xe_mmio.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h index 6ae0cc32c651..26551410ecc8 100644 --- a/drivers/gpu/drm/xe/xe_mmio.h +++ b/drivers/gpu/drm/xe/xe_mmio.h @@ -22,7 +22,6 @@ u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set); int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval); bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg); -int xe_mmio_probe_vram(struct xe_device *xe); u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg); int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic); From be8f9f4c866f41b62a3df9a5cfd54e0e88fc3e3e Mon Sep 17 00:00:00 2001 From: Gustavo Sousa Date: Tue, 23 Jul 2024 09:01:21 -0300 Subject: [PATCH 82/95] drm/xe/mmio: Use single logic for waiting functions The implementations for xe_mmio_wait32() and xe_mmio_wait32_not() are almost identical. Let us avoid duplication of logic by having them calling a common __xe_mmio_wait32() function. Signed-off-by: Gustavo Sousa Reviewed-by: Himal Prasad Ghimiray Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240723120120.5443-2-gustavo.sousa@intel.com --- drivers/gpu/drm/xe/xe_mmio.c | 143 +++++++++++++++-------------------- 1 file changed, 59 insertions(+), 84 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 435c01d003a8..bdcc7282385c 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -333,6 +333,59 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg) return (u64)udw << 32 | ldw; } +static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, + u32 *out_val, bool atomic, bool expect_match) +{ + ktime_t cur = ktime_get_raw(); + const ktime_t end = ktime_add_us(cur, timeout_us); + int ret = -ETIMEDOUT; + s64 wait = 10; + u32 read; + bool check; + + for (;;) { + read = xe_mmio_read32(gt, reg); + + check = (read & mask) == val; + if (!expect_match) + check = !check; + + if (check) { + ret = 0; + break; + } + + cur = ktime_get_raw(); + if (!ktime_before(cur, end)) + break; + + if (ktime_after(ktime_add_us(cur, wait), end)) + wait = ktime_us_delta(end, cur); + + if (atomic) + udelay(wait); + else + usleep_range(wait, wait << 1); + wait <<= 1; + } + + if (ret != 0) { + read = xe_mmio_read32(gt, reg); + + check = (read & mask) == val; + if (!expect_match) + check = !check; + + if (check) + ret = 0; + } + + if (out_val) + *out_val = read; + + return ret; +} + /** * xe_mmio_wait32() - Wait for a register to match the desired masked value * @gt: MMIO target GT @@ -355,43 +408,7 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg) int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic) { - ktime_t cur = ktime_get_raw(); - const ktime_t end = ktime_add_us(cur, timeout_us); - int ret = -ETIMEDOUT; - s64 wait = 10; - u32 read; - - for (;;) { - read = xe_mmio_read32(gt, reg); - if ((read & mask) == val) { - ret = 0; - break; - } - - cur = ktime_get_raw(); - if (!ktime_before(cur, end)) - break; - - if (ktime_after(ktime_add_us(cur, wait), end)) - wait = ktime_us_delta(end, cur); - - if (atomic) - udelay(wait); - else - usleep_range(wait, wait << 1); - wait <<= 1; - } - - if (ret != 0) { - read = xe_mmio_read32(gt, reg); - if ((read & mask) == val) - ret = 0; - } - - if (out_val) - *out_val = read; - - return ret; + return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true); } /** @@ -399,58 +416,16 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t * @gt: MMIO target GT * @reg: register to read value from * @mask: mask to be applied to the value read from the register - * @val: value to match after applying the mask - * @timeout_us: time out after this period of time. Wait logic tries to be - * smart, applying an exponential backoff until @timeout_us is reached. + * @val: value not to be matched after applying the mask + * @timeout_us: time out after this period of time * @out_val: if not NULL, points where to store the last unmasked value * @atomic: needs to be true if calling from an atomic context * - * This function polls for a masked value to change from a given value and - * returns zero on success or -ETIMEDOUT if timed out. - * - * Note that @timeout_us represents the minimum amount of time to wait before - * giving up. The actual time taken by this function can be a little more than - * @timeout_us for different reasons, specially in non-atomic contexts. Thus, - * it is possible that this function succeeds even after @timeout_us has passed. + * This function works exactly like xe_mmio_wait32() with the exception that + * @val is expected not to be matched. */ int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic) { - ktime_t cur = ktime_get_raw(); - const ktime_t end = ktime_add_us(cur, timeout_us); - int ret = -ETIMEDOUT; - s64 wait = 10; - u32 read; - - for (;;) { - read = xe_mmio_read32(gt, reg); - if ((read & mask) != val) { - ret = 0; - break; - } - - cur = ktime_get_raw(); - if (!ktime_before(cur, end)) - break; - - if (ktime_after(ktime_add_us(cur, wait), end)) - wait = ktime_us_delta(end, cur); - - if (atomic) - udelay(wait); - else - usleep_range(wait, wait << 1); - wait <<= 1; - } - - if (ret != 0) { - read = xe_mmio_read32(gt, reg); - if ((read & mask) != val) - ret = 0; - } - - if (out_val) - *out_val = read; - - return ret; + return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false); } From 5e9209c3739454a08c4d770c649187b0f51dffc6 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 25 Jul 2024 16:18:01 -0700 Subject: [PATCH 83/95] drm/xe: Assert G2H outstanding when releasing G2H Ensure we are managing G2H credits correctly. Extra important now that this is tied to PM. Signed-off-by: Matthew Brost Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20240725231801.1958038-1-matthew.brost@intel.com --- drivers/gpu/drm/xe/xe_guc_ct.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 64afc90ad2c5..beeeb120d1fc 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -516,6 +516,7 @@ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) lockdep_assert_held(&ct->fast_lock); xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <= ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); + xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding); ct->ctbs.g2h.info.space += g2h_len; if (!--ct->g2h_outstanding) From fd6797ec50c561f085bc94e3ee26f484a52af79e Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 25 Jul 2024 23:43:35 -0700 Subject: [PATCH 84/95] drm/xe/rtp: Fix off-by-one when processing rules Gustavo noticed an odd "+ 2" in rtp_mark_active() while processing rtp rules and pointed that it should be "+ 1". In fact, while processing entries without actions (OOB workarounds), if the WA is activated and has OR rules, it will also inadvertently activate the very next workaround. Test in a LNL B0 platform by moving 18024947630 on top of 16020292621, makes the latter become active: $ cat /sys/kernel/debug/dri/0/gt0/workarounds ... OOB Workarounds 18024947630 16020292621 14018094691 16022287689 13011645652 22019338487_display In future a kunit test will be added to cover the rtp checks for entries without actions. Fixes: fe19328b900c ("drm/xe/rtp: Add support for entries with no action") Cc: Gustavo Sousa Reviewed-by: Gustavo Sousa Link: https://patchwork.freedesktop.org/patch/msgid/20240726064337.797576-6-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_rtp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 02e28274282f..5efe83cc82ab 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -231,7 +231,7 @@ static void rtp_mark_active(struct xe_device *xe, if (first == last) bitmap_set(ctx->active_entries, first, 1); else - bitmap_set(ctx->active_entries, first, last - first + 2); + bitmap_set(ctx->active_entries, first, last - first + 1); } /** From 7657d7c96657179d002817d5ec2a1d4c84f21cdb Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Fri, 26 Jul 2024 10:17:58 -0700 Subject: [PATCH 85/95] drm/xe/migrate: Future-proof compressed PAT check Although all current Xe2 platforms support FlatCCS, we probably shouldn't assume that will be universally true forever. In the past we've had platforms like PVC that didn't support compression, and the same could show up again at some point in the future. Future-proof the migration code by adding an explicit check for FlatCCS support to the condition that decides whether to use a compressed PAT index for migration. While we're at it, we can drop the IS_DGFX check since it's redundant with the src_is_vram check (only dGPUs have VRAM). Cc: Akshata Jahagirdar Cc: Lucas De Marchi Signed-off-by: Matt Roper Reviewed-by: Akshata Jahagirdar Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240726171757.2728819-2-matthew.d.roper@intel.com --- drivers/gpu/drm/xe/xe_migrate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index c007f68503d4..6f24aaf58252 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -781,7 +781,8 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, bool copy_ccs = xe_device_has_flat_ccs(xe) && xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo); bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram); - bool use_comp_pat = GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe) && src_is_vram && !dst_is_vram; + bool use_comp_pat = xe_device_has_flat_ccs(xe) && + GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram; /* Copying CCS between two different BOs is not supported yet. */ if (XE_WARN_ON(copy_ccs && src_bo != dst_bo)) From e525473fd5b7202ae751316799ed759bfaaf02bc Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 26 Jul 2024 18:59:00 -0700 Subject: [PATCH 86/95] drm/xe/kunit: Test WAs for BMG Add one variant for BMG to make sure the workarounds do not conflict. This matches the machine with BMG in CI: BATTLEMAGE e20b:0000 dgfx:1 gfx:Xe2_LPG / Xe2_HPG (20.01) media:Xe2_LPM / Xe2_HPM (13.01) display:yes dma_m_s:46 tc:1 gscfi:0 cscfi:1 Stepping = (G:A0, M:A1, D:**, B:**) Reviewed-by: Gustavo Sousa Link: https://patchwork.freedesktop.org/patch/msgid/20240727015907.899192-2-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/tests/xe_wa_test.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c index 9d0c715142b9..c96d1fe34151 100644 --- a/drivers/gpu/drm/xe/tests/xe_wa_test.c +++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c @@ -74,6 +74,7 @@ static const struct platform_test_case cases[] = { GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0), GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0), GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0), + GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1), }; static void platform_desc(const struct platform_test_case *t, char *desc) From 20c3a0241b5c15c0d64929cba04653d5556b7d5d Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 26 Jul 2024 18:59:01 -0700 Subject: [PATCH 87/95] drm/xe/kunit: Rename count to count_sr_entries The RTP tests check both the result of processing the RTP entries and the outcome saved as SR entries. Rename "count" to be explicit about what's being counted. Reviewed-by: Gustavo Sousa Link: https://patchwork.freedesktop.org/patch/msgid/20240727015907.899192-3-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/tests/xe_rtp_test.c | 32 +++++++++++++------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c index f217445c246a..5ebaed1af3f2 100644 --- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c +++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c @@ -36,7 +36,7 @@ struct rtp_test_case { struct xe_reg expected_reg; u32 expected_set_bits; u32 expected_clr_bits; - unsigned long expected_count; + unsigned long expected_count_sr_entries; unsigned int expected_sr_errors; const struct xe_rtp_entry_sr *entries; }; @@ -57,7 +57,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0) | REG_BIT(1), .expected_clr_bits = REG_BIT(0) | REG_BIT(1), - .expected_count = 1, + .expected_count_sr_entries = 1, /* Different bits on the same register: create a single entry */ .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("basic-1"), @@ -76,7 +76,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), - .expected_count = 1, + .expected_count_sr_entries = 1, /* Don't coalesce second entry since rules don't match */ .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("basic-1"), @@ -95,7 +95,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2), .expected_clr_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2), - .expected_count = 1, + .expected_count_sr_entries = 1, .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("first"), XE_RTP_RULES(FUNC(match_yes), OR, FUNC(match_no)), @@ -121,7 +121,7 @@ static const struct rtp_test_case cases[] = { { .name = "match-or-xfail", .expected_reg = REGULAR_REG1, - .expected_count = 0, + .expected_count_sr_entries = 0, .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("leading-or"), XE_RTP_RULES(OR, FUNC(match_yes)), @@ -148,7 +148,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), - .expected_count = 1, + .expected_count_sr_entries = 1, /* Don't coalesce second entry due to one of the rules */ .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("basic-1"), @@ -167,7 +167,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), - .expected_count = 2, + .expected_count_sr_entries = 2, /* Same bits on different registers are not coalesced */ .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("basic-1"), @@ -186,7 +186,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(1) | REG_BIT(0), - .expected_count = 1, + .expected_count_sr_entries = 1, /* Check clr vs set actions on different bits */ .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("basic-1"), @@ -207,7 +207,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = TEMP_FIELD, .expected_clr_bits = TEMP_MASK, - .expected_count = 1, + .expected_count_sr_entries = 1, /* Check FIELD_SET works */ .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("basic-1"), @@ -225,7 +225,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), - .expected_count = 1, + .expected_count_sr_entries = 1, .expected_sr_errors = 1, .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("basic-1"), @@ -245,7 +245,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), - .expected_count = 1, + .expected_count_sr_entries = 1, .expected_sr_errors = 1, .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("basic-1"), @@ -265,7 +265,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), - .expected_count = 1, + .expected_count_sr_entries = 1, .expected_sr_errors = 2, .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("basic-1"), @@ -295,7 +295,7 @@ static void xe_rtp_process_tests(struct kunit *test) struct xe_reg_sr *reg_sr = >->reg_sr; const struct xe_reg_sr_entry *sre, *sr_entry = NULL; struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt); - unsigned long idx, count = 0; + unsigned long idx, count_sr_entries = 0; xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe); xe_rtp_process_to_sr(&ctx, param->entries, reg_sr); @@ -304,11 +304,11 @@ static void xe_rtp_process_tests(struct kunit *test) if (idx == param->expected_reg.addr) sr_entry = sre; - count++; + count_sr_entries++; } - KUNIT_EXPECT_EQ(test, count, param->expected_count); - if (count) { + KUNIT_EXPECT_EQ(test, count_sr_entries, param->expected_count_sr_entries); + if (count_sr_entries) { KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits); KUNIT_EXPECT_EQ(test, sr_entry->set_bits, param->expected_set_bits); KUNIT_EXPECT_EQ(test, sr_entry->reg.raw, param->expected_reg.raw); From d93a4fec186c106115d0b3165047dd06f2c1aaab Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 26 Jul 2024 18:59:02 -0700 Subject: [PATCH 88/95] drm/xe/kunit: Test active rtp entries Enabling active tracking in the rtp context and check for all the tests the expected entries become active. Reviewed-by: Gustavo Sousa Link: https://patchwork.freedesktop.org/patch/msgid/20240727015907.899192-4-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/tests/xe_rtp_test.c | 20 +++++++++++++++++++- drivers/gpu/drm/xe/xe_rtp.c | 1 + 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c index 5ebaed1af3f2..459f6ae9449d 100644 --- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c +++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c @@ -38,6 +38,7 @@ struct rtp_test_case { u32 expected_clr_bits; unsigned long expected_count_sr_entries; unsigned int expected_sr_errors; + unsigned long expected_active; const struct xe_rtp_entry_sr *entries; }; @@ -57,6 +58,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0) | REG_BIT(1), .expected_clr_bits = REG_BIT(0) | REG_BIT(1), + .expected_active = BIT(0) | BIT(1), .expected_count_sr_entries = 1, /* Different bits on the same register: create a single entry */ .entries = (const struct xe_rtp_entry_sr[]) { @@ -76,6 +78,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), + .expected_active = BIT(0), .expected_count_sr_entries = 1, /* Don't coalesce second entry since rules don't match */ .entries = (const struct xe_rtp_entry_sr[]) { @@ -95,6 +98,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2), .expected_clr_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2), + .expected_active = BIT(0) | BIT(1) | BIT(2), .expected_count_sr_entries = 1, .entries = (const struct xe_rtp_entry_sr[]) { { XE_RTP_NAME("first"), @@ -148,6 +152,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), + .expected_active = BIT(0), .expected_count_sr_entries = 1, /* Don't coalesce second entry due to one of the rules */ .entries = (const struct xe_rtp_entry_sr[]) { @@ -167,6 +172,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), + .expected_active = BIT(0) | BIT(1), .expected_count_sr_entries = 2, /* Same bits on different registers are not coalesced */ .entries = (const struct xe_rtp_entry_sr[]) { @@ -186,6 +192,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(1) | REG_BIT(0), + .expected_active = BIT(0) | BIT(1), .expected_count_sr_entries = 1, /* Check clr vs set actions on different bits */ .entries = (const struct xe_rtp_entry_sr[]) { @@ -207,6 +214,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = TEMP_FIELD, .expected_clr_bits = TEMP_MASK, + .expected_active = BIT(0), .expected_count_sr_entries = 1, /* Check FIELD_SET works */ .entries = (const struct xe_rtp_entry_sr[]) { @@ -225,6 +233,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), + .expected_active = BIT(0) | BIT(1), .expected_count_sr_entries = 1, .expected_sr_errors = 1, .entries = (const struct xe_rtp_entry_sr[]) { @@ -245,6 +254,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), + .expected_active = BIT(0) | BIT(1), .expected_count_sr_entries = 1, .expected_sr_errors = 1, .entries = (const struct xe_rtp_entry_sr[]) { @@ -265,6 +275,7 @@ static const struct rtp_test_case cases[] = { .expected_reg = REGULAR_REG1, .expected_set_bits = REG_BIT(0), .expected_clr_bits = REG_BIT(0), + .expected_active = BIT(0) | BIT(1) | BIT(2), .expected_count_sr_entries = 1, .expected_sr_errors = 2, .entries = (const struct xe_rtp_entry_sr[]) { @@ -295,9 +306,14 @@ static void xe_rtp_process_tests(struct kunit *test) struct xe_reg_sr *reg_sr = >->reg_sr; const struct xe_reg_sr_entry *sre, *sr_entry = NULL; struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt); - unsigned long idx, count_sr_entries = 0; + unsigned long idx, count_sr_entries = 0, count_rtp_entries = 0, active = 0; xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe); + + while (param->entries[count_rtp_entries].rules) + count_rtp_entries++; + + xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries); xe_rtp_process_to_sr(&ctx, param->entries, reg_sr); xa_for_each(®_sr->xa, idx, sre) { @@ -307,6 +323,8 @@ static void xe_rtp_process_tests(struct kunit *test) count_sr_entries++; } + KUNIT_EXPECT_EQ(test, active, param->expected_active); + KUNIT_EXPECT_EQ(test, count_sr_entries, param->expected_count_sr_entries); if (count_sr_entries) { KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits); diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 5efe83cc82ab..f054ac9cf06d 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -217,6 +217,7 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx, ctx->active_entries = active_entries; ctx->n_entries = n_entries; } +EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking); static void rtp_mark_active(struct xe_device *xe, struct xe_rtp_process_ctx *ctx, From 9eab82c38d0184289ffea56477daca8425758442 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 26 Jul 2024 18:59:03 -0700 Subject: [PATCH 89/95] drm/xe/kunit: Rename rtp test cases Those tests check the behavior of xe_rtp_process_to_sr(), so name them accordingly to allow adding tests for xe_rtp_process() later. Reviewed-by: Gustavo Sousa Link: https://patchwork.freedesktop.org/patch/msgid/20240727015907.899192-5-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/tests/xe_rtp_test.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c index 459f6ae9449d..893e3e01e2ab 100644 --- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c +++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c @@ -31,7 +31,7 @@ #undef XE_REG_MCR #define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1) -struct rtp_test_case { +struct rtp_to_sr_test_case { const char *name; struct xe_reg expected_reg; u32 expected_set_bits; @@ -52,7 +52,7 @@ static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe) return false; } -static const struct rtp_test_case cases[] = { +static const struct rtp_to_sr_test_case rtp_to_sr_cases[] = { { .name = "coalesce-same-reg", .expected_reg = REGULAR_REG1, @@ -298,9 +298,9 @@ static const struct rtp_test_case cases[] = { }, }; -static void xe_rtp_process_tests(struct kunit *test) +static void xe_rtp_process_to_sr_tests(struct kunit *test) { - const struct rtp_test_case *param = test->param_value; + const struct rtp_to_sr_test_case *param = test->param_value; struct xe_device *xe = test->priv; struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt; struct xe_reg_sr *reg_sr = >->reg_sr; @@ -308,7 +308,7 @@ static void xe_rtp_process_tests(struct kunit *test) struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt); unsigned long idx, count_sr_entries = 0, count_rtp_entries = 0, active = 0; - xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe); + xe_reg_sr_init(reg_sr, "xe_rtp_to_sr_tests", xe); while (param->entries[count_rtp_entries].rules) count_rtp_entries++; @@ -337,12 +337,12 @@ static void xe_rtp_process_tests(struct kunit *test) KUNIT_EXPECT_EQ(test, reg_sr->errors, param->expected_sr_errors); } -static void rtp_desc(const struct rtp_test_case *t, char *desc) +static void rtp_to_sr_desc(const struct rtp_to_sr_test_case *t, char *desc) { strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); } -KUNIT_ARRAY_PARAM(rtp, cases, rtp_desc); +KUNIT_ARRAY_PARAM(rtp_to_sr, rtp_to_sr_cases, rtp_to_sr_desc); static int xe_rtp_test_init(struct kunit *test) { @@ -375,7 +375,7 @@ static void xe_rtp_test_exit(struct kunit *test) } static struct kunit_case xe_rtp_tests[] = { - KUNIT_CASE_PARAM(xe_rtp_process_tests, rtp_gen_params), + KUNIT_CASE_PARAM(xe_rtp_process_to_sr_tests, rtp_to_sr_gen_params), {} }; From 6da8acfdb6e1aa72787efb733e3af9e9e0c57b9a Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 26 Jul 2024 18:59:04 -0700 Subject: [PATCH 90/95] drm/xe/kunit: Test rtp with no actions The OOB WAs use xe_rtp_process(), without passing an sr to save result of the actions since there are none. They are also executed in a gt-only context, making it harder to share the implementation. Thus, introduce a new set of tests to check these RTP entries. The only check that can be done is if the entry was marked as active. Before commit fd6797ec50c5 ("drm/xe/rtp: Fix off-by-one when processing rules") several of these tests were failing: the processing of OR'ed entries would make the subsequent entry to be inadvertently enabled. Reviewed-by: Gustavo Sousa Link: https://patchwork.freedesktop.org/patch/msgid/20240727015907.899192-6-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/tests/xe_rtp_test.c | 161 +++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_rtp.c | 1 + 2 files changed, 162 insertions(+) diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c index 893e3e01e2ab..9ab3d5950d59 100644 --- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c +++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c @@ -42,6 +42,12 @@ struct rtp_to_sr_test_case { const struct xe_rtp_entry_sr *entries; }; +struct rtp_test_case { + const char *name; + unsigned long expected_active; + const struct xe_rtp_entry *entries; +}; + static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe) { return true; @@ -337,6 +343,153 @@ static void xe_rtp_process_to_sr_tests(struct kunit *test) KUNIT_EXPECT_EQ(test, reg_sr->errors, param->expected_sr_errors); } +/* + * Entries below follow the logic used with xe_wa_oob.rules: + * 1) Entries with empty name are OR'ed: all entries marked active since the + * last entry with a name + * 2) There are no action associated with rules + */ +static const struct rtp_test_case rtp_cases[] = { + { + .name = "active1", + .expected_active = BIT(0), + .entries = (const struct xe_rtp_entry[]) { + { XE_RTP_NAME("r1"), + XE_RTP_RULES(FUNC(match_yes)), + }, + {} + }, + }, + { + .name = "active2", + .expected_active = BIT(0) | BIT(1), + .entries = (const struct xe_rtp_entry[]) { + { XE_RTP_NAME("r1"), + XE_RTP_RULES(FUNC(match_yes)), + }, + { XE_RTP_NAME("r2"), + XE_RTP_RULES(FUNC(match_yes)), + }, + {} + }, + }, + { + .name = "active-inactive", + .expected_active = BIT(0), + .entries = (const struct xe_rtp_entry[]) { + { XE_RTP_NAME("r1"), + XE_RTP_RULES(FUNC(match_yes)), + }, + { XE_RTP_NAME("r2"), + XE_RTP_RULES(FUNC(match_no)), + }, + {} + }, + }, + { + .name = "inactive-active", + .expected_active = BIT(1), + .entries = (const struct xe_rtp_entry[]) { + { XE_RTP_NAME("r1"), + XE_RTP_RULES(FUNC(match_no)), + }, + { XE_RTP_NAME("r2"), + XE_RTP_RULES(FUNC(match_yes)), + }, + {} + }, + }, + { + .name = "inactive-1st_or_active-inactive", + .expected_active = BIT(1) | BIT(2) | BIT(3), + .entries = (const struct xe_rtp_entry[]) { + { XE_RTP_NAME("r1"), + XE_RTP_RULES(FUNC(match_no)), + }, + { XE_RTP_NAME("r2_or_conditions"), + XE_RTP_RULES(FUNC(match_yes)), + }, + { XE_RTP_RULES(FUNC(match_no)) }, + { XE_RTP_RULES(FUNC(match_no)) }, + { XE_RTP_NAME("r3"), + XE_RTP_RULES(FUNC(match_no)), + }, + {} + }, + }, + { + .name = "inactive-2nd_or_active-inactive", + .expected_active = BIT(1) | BIT(2) | BIT(3), + .entries = (const struct xe_rtp_entry[]) { + { XE_RTP_NAME("r1"), + XE_RTP_RULES(FUNC(match_no)), + }, + { XE_RTP_NAME("r2_or_conditions"), + XE_RTP_RULES(FUNC(match_no)), + }, + { XE_RTP_RULES(FUNC(match_yes)) }, + { XE_RTP_RULES(FUNC(match_no)) }, + { XE_RTP_NAME("r3"), + XE_RTP_RULES(FUNC(match_no)), + }, + {} + }, + }, + { + .name = "inactive-last_or_active-inactive", + .expected_active = BIT(1) | BIT(2) | BIT(3), + .entries = (const struct xe_rtp_entry[]) { + { XE_RTP_NAME("r1"), + XE_RTP_RULES(FUNC(match_no)), + }, + { XE_RTP_NAME("r2_or_conditions"), + XE_RTP_RULES(FUNC(match_no)), + }, + { XE_RTP_RULES(FUNC(match_no)) }, + { XE_RTP_RULES(FUNC(match_yes)) }, + { XE_RTP_NAME("r3"), + XE_RTP_RULES(FUNC(match_no)), + }, + {} + }, + }, + { + .name = "inactive-no_or_active-inactive", + .expected_active = 0, + .entries = (const struct xe_rtp_entry[]) { + { XE_RTP_NAME("r1"), + XE_RTP_RULES(FUNC(match_no)), + }, + { XE_RTP_NAME("r2_or_conditions"), + XE_RTP_RULES(FUNC(match_no)), + }, + { XE_RTP_RULES(FUNC(match_no)) }, + { XE_RTP_RULES(FUNC(match_no)) }, + { XE_RTP_NAME("r3"), + XE_RTP_RULES(FUNC(match_no)), + }, + {} + }, + }, +}; + +static void xe_rtp_process_tests(struct kunit *test) +{ + const struct rtp_test_case *param = test->param_value; + struct xe_device *xe = test->priv; + struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt; + struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt); + unsigned long count_rtp_entries = 0, active = 0; + + while (param->entries[count_rtp_entries].rules) + count_rtp_entries++; + + xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries); + xe_rtp_process(&ctx, param->entries); + + KUNIT_EXPECT_EQ(test, active, param->expected_active); +} + static void rtp_to_sr_desc(const struct rtp_to_sr_test_case *t, char *desc) { strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); @@ -344,6 +497,13 @@ static void rtp_to_sr_desc(const struct rtp_to_sr_test_case *t, char *desc) KUNIT_ARRAY_PARAM(rtp_to_sr, rtp_to_sr_cases, rtp_to_sr_desc); +static void rtp_desc(const struct rtp_test_case *t, char *desc) +{ + strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(rtp, rtp_cases, rtp_desc); + static int xe_rtp_test_init(struct kunit *test) { struct xe_device *xe; @@ -376,6 +536,7 @@ static void xe_rtp_test_exit(struct kunit *test) static struct kunit_case xe_rtp_tests[] = { KUNIT_CASE_PARAM(xe_rtp_process_to_sr_tests, rtp_to_sr_gen_params), + KUNIT_CASE_PARAM(xe_rtp_process_tests, rtp_gen_params), {} }; diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index f054ac9cf06d..1c641cc0f5a1 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -327,6 +327,7 @@ void xe_rtp_process(struct xe_rtp_process_ctx *ctx, entry - entries); } } +EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process); bool xe_rtp_match_even_instance(const struct xe_gt *gt, const struct xe_hw_engine *hwe) From d51a75ac975a6147a4dea2d68cef4566f5c2ecaf Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 26 Jul 2024 18:59:05 -0700 Subject: [PATCH 91/95] drm/xe/rtp: Simplify marking active workarounds Stop doing the calculation both in rtp_mark_active() and in its caller. The caller easily knows the number of entries to mark, so just pass it forward. That also simplifies rtp_mark_active() since now it doesn't have a special case when handling 1 entry. Reviewed-by: Gustavo Sousa Link: https://patchwork.freedesktop.org/patch/msgid/20240727015907.899192-7-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_rtp.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 1c641cc0f5a1..86727f34ca25 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -221,18 +221,15 @@ EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking); static void rtp_mark_active(struct xe_device *xe, struct xe_rtp_process_ctx *ctx, - unsigned int first, unsigned int last) + unsigned int first, unsigned int n_entries) { if (!ctx->active_entries) return; - if (drm_WARN_ON(&xe->drm, last > ctx->n_entries)) + if (drm_WARN_ON(&xe->drm, first + n_entries > ctx->n_entries)) return; - if (first == last) - bitmap_set(ctx->active_entries, first, 1); - else - bitmap_set(ctx->active_entries, first, last - first + 1); + bitmap_set(ctx->active_entries, first, n_entries); } /** @@ -277,8 +274,7 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, } if (match) - rtp_mark_active(xe, ctx, entry - entries, - entry - entries); + rtp_mark_active(xe, ctx, entry - entries, 1); } } EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr); @@ -324,7 +320,7 @@ void xe_rtp_process(struct xe_rtp_process_ctx *ctx, entry--; rtp_mark_active(xe, ctx, first_entry - entries, - entry - entries); + entry - first_entry + 1); } } EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process); From 1c408c516414fd9760c54f6d42ed1fa44a1dff15 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 26 Jul 2024 18:59:06 -0700 Subject: [PATCH 92/95] drm/xe/rtp: Expand max rules/actions per entry again Like commit 512660cd1f1a ("drm/xe/rtp: Expand max rules/actions per entry") did, expand the maximum number of actions/rules. That commit was too conservative, just incrementing 2. Other than the ugliness of these macros and additional preprocessor steps when they are used, there are no downsides on increasing the maximum: the tables in which they are used use a sentinel to mark the last element. With rtp processing now supporting OR rules, it's possible to migrate the extension made for OOB WAs that "entries with name are OR'ed in previous entry". For that the maximum number of rules needs to be increased. Just double it. Hopefully 12 is sufficient for longer than 6 was. Reviewed-by: Gustavo Sousa Link: https://patchwork.freedesktop.org/patch/msgid/20240727015907.899192-8-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_rtp.h | 4 ++-- drivers/gpu/drm/xe/xe_rtp_helpers.h | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h index ad446731192c..827d932b6908 100644 --- a/drivers/gpu/drm/xe/xe_rtp.h +++ b/drivers/gpu/drm/xe/xe_rtp.h @@ -374,7 +374,7 @@ struct xe_reg_sr; * XE_RTP_RULES - Helper to set multiple rules to a struct xe_rtp_entry_sr entry * @...: Rules * - * At least one rule is needed and up to 6 are supported. Multiple rules are + * At least one rule is needed and up to 12 are supported. Multiple rules are * AND'ed together, i.e. all the rules must evaluate to true for the entry to * be processed. See XE_RTP_MATCH_* for the possible match rules. Example: * @@ -399,7 +399,7 @@ struct xe_reg_sr; * XE_RTP_ACTIONS - Helper to set multiple actions to a struct xe_rtp_entry_sr * @...: Actions to be taken * - * At least one action is needed and up to 6 are supported. See XE_RTP_ACTION_* + * At least one action is needed and up to 12 are supported. See XE_RTP_ACTION_* * for the possible actions. Example: * * .. code-block:: c diff --git a/drivers/gpu/drm/xe/xe_rtp_helpers.h b/drivers/gpu/drm/xe/xe_rtp_helpers.h index c59e40fd7fff..a33b0ae98bbc 100644 --- a/drivers/gpu/drm/xe/xe_rtp_helpers.h +++ b/drivers/gpu/drm/xe/xe_rtp_helpers.h @@ -60,6 +60,12 @@ #define XE_RTP_PASTE_4(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_3(prefix_, sep_, _XE_TUPLE_TAIL args_) #define XE_RTP_PASTE_5(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_4(prefix_, sep_, _XE_TUPLE_TAIL args_) #define XE_RTP_PASTE_6(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_5(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_7(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_6(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_8(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_7(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_9(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_8(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_10(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_9(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_11(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_10(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_12(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_11(prefix_, sep_, _XE_TUPLE_TAIL args_) /* * XE_RTP_DROP_CAST - Drop cast to convert a compound statement to a initializer From dbcbfc72d3b660b6dd9ea7d2c089c228609714ec Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 26 Jul 2024 18:59:07 -0700 Subject: [PATCH 93/95] drm/xe: Migrate OOB WAs to OR rules Now that rtp has OR rules, it's not needed to extend it to process OOB WAs. Previously if an entry had no name, it was considered as "a set of rules OR'ed with the last named entry". Instead of generating new entries, add OR rules. The syntax for xe_wa_oob.rules remains the same, with xe_gen_wa_oob generating the slightly different table. Object sizes delta are negligible, but having just one logic makes it easier to maintain: add/remove: 0/0 grow/shrink: 1/2 up/down: 160/-269 (-109) Function old new delta __compound_literal 6104 6264 +160 xe_wa_dump 1839 1810 -29 oob_was 816 576 -240 Total: Before=17257, After=17148, chg -0.63% Reviewed-by: Gustavo Sousa Link: https://patchwork.freedesktop.org/patch/msgid/20240727015907.899192-9-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/tests/xe_rtp_test.c | 34 +++++++++++------------- drivers/gpu/drm/xe/xe_gen_wa_oob.c | 16 +++++++++--- drivers/gpu/drm/xe/xe_rtp.c | 36 +++++++------------------- 3 files changed, 37 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c index 9ab3d5950d59..36a3b5420fef 100644 --- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c +++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c @@ -401,16 +401,15 @@ static const struct rtp_test_case rtp_cases[] = { }, { .name = "inactive-1st_or_active-inactive", - .expected_active = BIT(1) | BIT(2) | BIT(3), + .expected_active = BIT(1), .entries = (const struct xe_rtp_entry[]) { { XE_RTP_NAME("r1"), XE_RTP_RULES(FUNC(match_no)), }, { XE_RTP_NAME("r2_or_conditions"), - XE_RTP_RULES(FUNC(match_yes)), - }, - { XE_RTP_RULES(FUNC(match_no)) }, - { XE_RTP_RULES(FUNC(match_no)) }, + XE_RTP_RULES(FUNC(match_yes), OR, + FUNC(match_no), OR, + FUNC(match_no)) }, { XE_RTP_NAME("r3"), XE_RTP_RULES(FUNC(match_no)), }, @@ -419,16 +418,15 @@ static const struct rtp_test_case rtp_cases[] = { }, { .name = "inactive-2nd_or_active-inactive", - .expected_active = BIT(1) | BIT(2) | BIT(3), + .expected_active = BIT(1), .entries = (const struct xe_rtp_entry[]) { { XE_RTP_NAME("r1"), XE_RTP_RULES(FUNC(match_no)), }, { XE_RTP_NAME("r2_or_conditions"), - XE_RTP_RULES(FUNC(match_no)), - }, - { XE_RTP_RULES(FUNC(match_yes)) }, - { XE_RTP_RULES(FUNC(match_no)) }, + XE_RTP_RULES(FUNC(match_no), OR, + FUNC(match_yes), OR, + FUNC(match_no)) }, { XE_RTP_NAME("r3"), XE_RTP_RULES(FUNC(match_no)), }, @@ -437,16 +435,15 @@ static const struct rtp_test_case rtp_cases[] = { }, { .name = "inactive-last_or_active-inactive", - .expected_active = BIT(1) | BIT(2) | BIT(3), + .expected_active = BIT(1), .entries = (const struct xe_rtp_entry[]) { { XE_RTP_NAME("r1"), XE_RTP_RULES(FUNC(match_no)), }, { XE_RTP_NAME("r2_or_conditions"), - XE_RTP_RULES(FUNC(match_no)), - }, - { XE_RTP_RULES(FUNC(match_no)) }, - { XE_RTP_RULES(FUNC(match_yes)) }, + XE_RTP_RULES(FUNC(match_no), OR, + FUNC(match_no), OR, + FUNC(match_yes)) }, { XE_RTP_NAME("r3"), XE_RTP_RULES(FUNC(match_no)), }, @@ -461,10 +458,9 @@ static const struct rtp_test_case rtp_cases[] = { XE_RTP_RULES(FUNC(match_no)), }, { XE_RTP_NAME("r2_or_conditions"), - XE_RTP_RULES(FUNC(match_no)), - }, - { XE_RTP_RULES(FUNC(match_no)) }, - { XE_RTP_RULES(FUNC(match_no)) }, + XE_RTP_RULES(FUNC(match_no), OR, + FUNC(match_no), OR, + FUNC(match_no)) }, { XE_RTP_NAME("r3"), XE_RTP_RULES(FUNC(match_no)), }, diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c index 106ee2b027f0..904cf47925aa 100644 --- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c +++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c @@ -97,19 +97,27 @@ static int parse(FILE *input, FILE *csource, FILE *cheader) if (name) { fprintf(cheader, "\tXE_WA_OOB_%s = %u,\n", name, idx); - fprintf(csource, "{ XE_RTP_NAME(\"%s\"), XE_RTP_RULES(%s) },\n", + + /* Close previous entry before starting a new one */ + if (idx) + fprintf(csource, ") },\n"); + + fprintf(csource, "{ XE_RTP_NAME(\"%s\"),\n XE_RTP_RULES(%s", name, rules); + idx++; } else { - fprintf(csource, "{ XE_RTP_NAME(NULL), XE_RTP_RULES(%s) },\n", - rules); + fprintf(csource, ", OR,\n\t%s", rules); } - idx++; lineno++; if (!is_continuation) prev_name = name; } + /* Close last entry */ + if (idx) + fprintf(csource, ") },\n"); + fprintf(cheader, "\t_XE_WA_OOB_COUNT = %u\n", idx); return 0; diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 86727f34ca25..e78ba324dd18 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -221,15 +221,15 @@ EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking); static void rtp_mark_active(struct xe_device *xe, struct xe_rtp_process_ctx *ctx, - unsigned int first, unsigned int n_entries) + unsigned int idx) { if (!ctx->active_entries) return; - if (drm_WARN_ON(&xe->drm, first + n_entries > ctx->n_entries)) + if (drm_WARN_ON(&xe->drm, idx >= ctx->n_entries)) return; - bitmap_set(ctx->active_entries, first, n_entries); + bitmap_set(ctx->active_entries, idx, 1); } /** @@ -274,7 +274,7 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, } if (match) - rtp_mark_active(xe, ctx, entry - entries, 1); + rtp_mark_active(xe, ctx, entry - entries); } } EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr); @@ -285,42 +285,26 @@ EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr); * @entries: Table with RTP definitions * * Walk the table pointed by @entries (with an empty sentinel), executing the - * rules. A few differences from xe_rtp_process_to_sr(): - * - * 1. There is no action associated with each entry since this uses - * struct xe_rtp_entry. Its main use is for marking active workarounds via - * xe_rtp_process_ctx_enable_active_tracking(). - * 2. There is support for OR operations by having entries with no name. + * rules. One difference from xe_rtp_process_to_sr(): there is no action + * associated with each entry since this uses struct xe_rtp_entry. Its main use + * is for marking active workarounds via + * xe_rtp_process_ctx_enable_active_tracking(). */ void xe_rtp_process(struct xe_rtp_process_ctx *ctx, const struct xe_rtp_entry *entries) { - const struct xe_rtp_entry *entry, *first_entry; + const struct xe_rtp_entry *entry; struct xe_hw_engine *hwe; struct xe_gt *gt; struct xe_device *xe; rtp_get_context(ctx, &hwe, >, &xe); - first_entry = entries; - if (drm_WARN_ON(&xe->drm, !first_entry->name)) - return; - for (entry = entries; entry && entry->rules; entry++) { - if (entry->name) - first_entry = entry; - if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules)) continue; - /* Fast-forward entry, eliminating the OR'ed entries */ - for (entry++; entry && entry->rules; entry++) - if (entry->name) - break; - entry--; - - rtp_mark_active(xe, ctx, first_entry - entries, - entry - first_entry + 1); + rtp_mark_active(xe, ctx, entry - entries); } } EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process); From e4ac526c440af8aa94d2bdfe6066339dd93b4db2 Mon Sep 17 00:00:00 2001 From: Sai Teja Pottumuttu Date: Wed, 24 Jul 2024 17:45:21 +0530 Subject: [PATCH 94/95] drm/xe/xe2hpg: Introduce performance tuning changes for Xe2_HPG Add performance tuning changes for Xe2_HPG Bspec: 72161 Signed-off-by: Sai Teja Pottumuttu Reviewed-by: Gustavo Sousa Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240724121521.2347524-1-sai.teja.pottumuttu@intel.com --- drivers/gpu/drm/xe/regs/xe_gt_regs.h | 1 + drivers/gpu/drm/xe/xe_tuning.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 8a94a94d2267..3b87f95f9ecf 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -108,6 +108,7 @@ #define FF_MODE XE_REG_MCR(0x6210) #define DIS_TE_AUTOSTRIP REG_BIT(31) +#define VS_HIT_MAX_VALUE_MASK REG_GENMASK(25, 20) #define DIS_MESH_PARTIAL_AUTOSTRIP REG_BIT(16) #define DIS_MESH_AUTOSTRIP REG_BIT(15) diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index d4e6fa918942..77d4eec0118d 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -93,6 +93,14 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = { REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f))) }, + /* Xe2_HPG */ + + { XE_RTP_NAME("Tuning: vs hit max value"), + XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK, + REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f))) + }, + {} }; From f2881dfdaaa9ec873dbd383ef5512fc31e576cbb Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 29 Jul 2024 11:26:34 +0200 Subject: [PATCH 95/95] drm/xe/oa/uapi: Make bit masks unsigned MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When building with gcc-5: In function ‘decode_oa_format.isra.26’, inlined from ‘xe_oa_set_prop_oa_format’ at drivers/gpu/drm/xe/xe_oa.c:1664:6: ././include/linux/compiler_types.h:510:38: error: call to ‘__compiletime_assert_1336’ declared with attribute error: FIELD_GET: mask is not constant [...] ./include/linux/bitfield.h:155:3: note: in expansion of macro ‘__BF_FIELD_CHECK’ __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ ^ drivers/gpu/drm/xe/xe_oa.c:1573:18: note: in expansion of macro ‘FIELD_GET’ u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt); ^ Fixes: b6fd51c62119 ("drm/xe/oa/uapi: Define and parse OA stream properties") Signed-off-by: Geert Uytterhoeven Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240729092634.2227611-1-geert+renesas@glider.be Signed-off-by: Lucas De Marchi --- include/uapi/drm/xe_drm.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 29425d7fdc77..b6fbe4988f2e 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -1598,10 +1598,10 @@ enum drm_xe_oa_property_id { * b. Counter select c. Counter size and d. BC report. Also refer to the * oa_formats array in drivers/gpu/drm/xe/xe_oa.c. */ -#define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xff << 0) -#define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xff << 8) -#define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xff << 16) -#define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xff << 24) +#define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0) +#define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8) +#define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16) +#define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24) /** * @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit