Pull drm fixes from Dave Airlie:
 "Regular weekly pull request, from sunny San Diego. Usual suspects in
  xe/i915/amdgpu with small fixes all over, then some minor fixes across
  a few other drivers. It's probably a bit on the heavy side, but most
  of the fix seem well contained,

  core:
   - drm_dev_unplug UAF fix

  pagemap:
   - lock handling fix

  xe:
   - A number of teardown fixes
   - Skip over non-leaf PTE for PRL generation
   - Fix an uninitialized variable
   - Fix a missing runtime PM reference

  i915/display:
   - Fix #15771: Screen corruption and stuttering on P14s w/ 3K display
   - Fix for PSR entry setup frames count on rejected commit
   - Fix OOPS if firmware is not loaded and suspend is attempted
   - Fix unlikely NULL deref due to DC6 on probe

  amdgpu:
   - Fix gamma 2.2 colorop TFs
   - BO list fix
   - LTO fix
   - DC FP fix
   - DisplayID handling fix
   - DCN 2.01 fix
   - MMHUB boundary fixes
   - ISP fix
   - TLB fence fix
   - Hainan pm fix

  radeon:
   - Hainan pm fix

  vmwgfx:
   - memory leak fix
   - doc warning fix

  imagination:
   - deadlock fix
   - interrupt handling fixes

  dw-hdmi-qp:
   - multi channel audio fix"

* tag 'drm-fixes-2026-03-21' of https://gitlab.freedesktop.org/drm/kernel: (40 commits)
  drm/xe: Fix missing runtime PM reference in ccs_mode_store
  drm/xe: Open-code GGTT MMIO access protection
  drm/xe/lrc: Fix uninitialized new_ts when capturing context timestamp
  drm/xe/oa: Allow reading after disabling OA stream
  drm/xe: Skip over non leaf pte for PRL generation
  drm/xe/guc: Ensure CT state transitions via STOP before DISABLED
  drm/xe: Trigger queue cleanup if not in wedged mode 2
  drm/xe: Forcefully tear down exec queues in GuC submit fini
  drm/xe: Always kill exec queues in xe_guc_submit_pause_abort
  drm/xe/guc: Fail immediately on GuC load error
  drm/i915/gt: Check set_default_submission() before deferencing
  drm/radeon: apply state adjust rules to some additional HAINAN vairants
  drm/amdgpu: apply state adjust rules to some additional HAINAN vairants
  drm/amdgpu: rework how we handle TLB fences
  drm/bridge: dw-hdmi-qp: fix multi-channel audio output
  drm: Fix use-after-free on framebuffers and property blobs when calling drm_dev_unplug
  drm/amdgpu: Fix ISP segfault issue in kernel v7.0
  drm/amdgpu/gmc9.0: add bounds checking for cid
  drm/amdgpu/mmhub4.2.0: add bounds checking for cid
  drm/amdgpu/mmhub4.1.0: add bounds checking for cid
  ...
This commit is contained in:
Linus Torvalds
2026-03-20 18:21:27 -07:00
41 changed files with 325 additions and 167 deletions

View File

@@ -36,6 +36,7 @@
#define AMDGPU_BO_LIST_MAX_PRIORITY 32u #define AMDGPU_BO_LIST_MAX_PRIORITY 32u
#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
#define AMDGPU_BO_LIST_MAX_ENTRIES (128 * 1024)
static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu) static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
{ {
@@ -188,6 +189,9 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
const uint32_t bo_number = in->bo_number; const uint32_t bo_number = in->bo_number;
struct drm_amdgpu_bo_list_entry *info; struct drm_amdgpu_bo_list_entry *info;
if (bo_number > AMDGPU_BO_LIST_MAX_ENTRIES)
return -EINVAL;
/* copy the handle array from userspace to a kernel buffer */ /* copy the handle array from userspace to a kernel buffer */
if (likely(info_size == bo_info_size)) { if (likely(info_size == bo_info_size)) {
info = vmemdup_array_user(uptr, bo_number, info_size); info = vmemdup_array_user(uptr, bo_number, info_size);

View File

@@ -1069,7 +1069,10 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
} }
/* Prepare a TLB flush fence to be attached to PTs */ /* Prepare a TLB flush fence to be attached to PTs */
if (!params->unlocked) { /* The check for need_tlb_fence should be dropped once we
* sort out the issues with KIQ/MES TLB invalidation timeouts.
*/
if (!params->unlocked && vm->need_tlb_fence) {
amdgpu_vm_tlb_fence_create(params->adev, vm, fence); amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
/* Makes sure no PD/PT is freed before the flush */ /* Makes sure no PD/PT is freed before the flush */
@@ -2602,6 +2605,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ttm_lru_bulk_move_init(&vm->lru_bulk_move); ttm_lru_bulk_move_init(&vm->lru_bulk_move);
vm->is_compute_context = false; vm->is_compute_context = false;
vm->need_tlb_fence = amdgpu_userq_enabled(&adev->ddev);
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX); AMDGPU_VM_USE_CPU_FOR_GFX);
@@ -2739,6 +2743,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_update); dma_fence_put(vm->last_update);
vm->last_update = dma_fence_get_stub(); vm->last_update = dma_fence_get_stub();
vm->is_compute_context = true; vm->is_compute_context = true;
vm->need_tlb_fence = true;
unreserve_bo: unreserve_bo:
amdgpu_bo_unreserve(vm->root.bo); amdgpu_bo_unreserve(vm->root.bo);

View File

@@ -441,6 +441,8 @@ struct amdgpu_vm {
struct ttm_lru_bulk_move lru_bulk_move; struct ttm_lru_bulk_move lru_bulk_move;
/* Flag to indicate if VM is used for compute */ /* Flag to indicate if VM is used for compute */
bool is_compute_context; bool is_compute_context;
/* Flag to indicate if VM needs a TLB fence (KFD or KGD) */
bool need_tlb_fence;
/* Memory partition number, -1 means any partition */ /* Memory partition number, -1 means any partition */
int8_t mem_id; int8_t mem_id;

View File

@@ -662,28 +662,35 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
} else { } else {
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(9, 0, 0): case IP_VERSION(9, 0, 0):
mmhub_cid = mmhub_client_ids_vega10[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega10) ?
mmhub_client_ids_vega10[cid][rw] : NULL;
break; break;
case IP_VERSION(9, 3, 0): case IP_VERSION(9, 3, 0):
mmhub_cid = mmhub_client_ids_vega12[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega12) ?
mmhub_client_ids_vega12[cid][rw] : NULL;
break; break;
case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 0):
mmhub_cid = mmhub_client_ids_vega20[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega20) ?
mmhub_client_ids_vega20[cid][rw] : NULL;
break; break;
case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 1):
mmhub_cid = mmhub_client_ids_arcturus[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_arcturus) ?
mmhub_client_ids_arcturus[cid][rw] : NULL;
break; break;
case IP_VERSION(9, 1, 0): case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 0): case IP_VERSION(9, 2, 0):
mmhub_cid = mmhub_client_ids_raven[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_raven) ?
mmhub_client_ids_raven[cid][rw] : NULL;
break; break;
case IP_VERSION(1, 5, 0): case IP_VERSION(1, 5, 0):
case IP_VERSION(2, 4, 0): case IP_VERSION(2, 4, 0):
mmhub_cid = mmhub_client_ids_renoir[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_renoir) ?
mmhub_client_ids_renoir[cid][rw] : NULL;
break; break;
case IP_VERSION(1, 8, 0): case IP_VERSION(1, 8, 0):
case IP_VERSION(9, 4, 2): case IP_VERSION(9, 4, 2):
mmhub_cid = mmhub_client_ids_aldebaran[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_aldebaran) ?
mmhub_client_ids_aldebaran[cid][rw] : NULL;
break; break;
default: default:
mmhub_cid = NULL; mmhub_cid = NULL;

View File

@@ -129,7 +129,7 @@ static int isp_genpd_add_device(struct device *dev, void *data)
if (!pdev) if (!pdev)
return -EINVAL; return -EINVAL;
if (!dev->type->name) { if (!dev->type || !dev->type->name) {
drm_dbg(&adev->ddev, "Invalid device type to add\n"); drm_dbg(&adev->ddev, "Invalid device type to add\n");
goto exit; goto exit;
} }
@@ -165,7 +165,7 @@ static int isp_genpd_remove_device(struct device *dev, void *data)
if (!pdev) if (!pdev)
return -EINVAL; return -EINVAL;
if (!dev->type->name) { if (!dev->type || !dev->type->name) {
drm_dbg(&adev->ddev, "Invalid device type to remove\n"); drm_dbg(&adev->ddev, "Invalid device type to remove\n");
goto exit; goto exit;
} }

View File

@@ -154,14 +154,17 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(2, 0, 0): case IP_VERSION(2, 0, 0):
case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 2):
mmhub_cid = mmhub_client_ids_navi1x[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_navi1x) ?
mmhub_client_ids_navi1x[cid][rw] : NULL;
break; break;
case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 0):
case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 1):
mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_sienna_cichlid) ?
mmhub_client_ids_sienna_cichlid[cid][rw] : NULL;
break; break;
case IP_VERSION(2, 1, 2): case IP_VERSION(2, 1, 2):
mmhub_cid = mmhub_client_ids_beige_goby[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_beige_goby) ?
mmhub_client_ids_beige_goby[cid][rw] : NULL;
break; break;
default: default:
mmhub_cid = NULL; mmhub_cid = NULL;

View File

@@ -94,7 +94,8 @@ mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
case IP_VERSION(2, 3, 0): case IP_VERSION(2, 3, 0):
case IP_VERSION(2, 4, 0): case IP_VERSION(2, 4, 0):
case IP_VERSION(2, 4, 1): case IP_VERSION(2, 4, 1):
mmhub_cid = mmhub_client_ids_vangogh[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vangogh) ?
mmhub_client_ids_vangogh[cid][rw] : NULL;
break; break;
default: default:
mmhub_cid = NULL; mmhub_cid = NULL;

View File

@@ -110,7 +110,8 @@ mmhub_v3_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 1): case IP_VERSION(3, 0, 1):
mmhub_cid = mmhub_client_ids_v3_0_0[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_0_0) ?
mmhub_client_ids_v3_0_0[cid][rw] : NULL;
break; break;
default: default:
mmhub_cid = NULL; mmhub_cid = NULL;

View File

@@ -117,7 +117,8 @@ mmhub_v3_0_1_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 0, 1): case IP_VERSION(3, 0, 1):
mmhub_cid = mmhub_client_ids_v3_0_1[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_0_1) ?
mmhub_client_ids_v3_0_1[cid][rw] : NULL;
break; break;
default: default:
mmhub_cid = NULL; mmhub_cid = NULL;

View File

@@ -108,7 +108,8 @@ mmhub_v3_0_2_print_l2_protection_fault_status(struct amdgpu_device *adev,
"MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status); status);
mmhub_cid = mmhub_client_ids_v3_0_2[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_0_2) ?
mmhub_client_ids_v3_0_2[cid][rw] : NULL;
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
mmhub_cid ? mmhub_cid : "unknown", cid); mmhub_cid ? mmhub_cid : "unknown", cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",

View File

@@ -102,7 +102,8 @@ mmhub_v4_1_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
status); status);
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(4, 1, 0): case IP_VERSION(4, 1, 0):
mmhub_cid = mmhub_client_ids_v4_1_0[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v4_1_0) ?
mmhub_client_ids_v4_1_0[cid][rw] : NULL;
break; break;
default: default:
mmhub_cid = NULL; mmhub_cid = NULL;

View File

@@ -688,7 +688,8 @@ mmhub_v4_2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
status); status);
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(4, 2, 0): case IP_VERSION(4, 2, 0):
mmhub_cid = mmhub_client_ids_v4_2_0[cid][rw]; mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v4_2_0) ?
mmhub_client_ids_v4_2_0[cid][rw] : NULL;
break; break;
default: default:
mmhub_cid = NULL; mmhub_cid = NULL;

View File

@@ -2554,7 +2554,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
fw_meta_info_params.fw_inst_const = adev->dm.dmub_fw->data + fw_meta_info_params.fw_inst_const = adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) + le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
PSP_HEADER_BYTES_256; PSP_HEADER_BYTES_256;
fw_meta_info_params.fw_bss_data = region_params.bss_data_size ? adev->dm.dmub_fw->data + fw_meta_info_params.fw_bss_data = fw_meta_info_params.bss_data_size ? adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) + le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
le32_to_cpu(hdr->inst_const_bytes) : NULL; le32_to_cpu(hdr->inst_const_bytes) : NULL;
fw_meta_info_params.custom_psp_footer_size = 0; fw_meta_info_params.custom_psp_footer_size = 0;
@@ -13119,7 +13119,7 @@ static void parse_edid_displayid_vrr(struct drm_connector *connector,
u16 min_vfreq; u16 min_vfreq;
u16 max_vfreq; u16 max_vfreq;
if (edid == NULL || edid->extensions == 0) if (!edid || !edid->extensions)
return; return;
/* Find DisplayID extension */ /* Find DisplayID extension */
@@ -13129,7 +13129,7 @@ static void parse_edid_displayid_vrr(struct drm_connector *connector,
break; break;
} }
if (edid_ext == NULL) if (i == edid->extensions)
return; return;
while (j < EDID_LENGTH) { while (j < EDID_LENGTH) {

View File

@@ -37,19 +37,19 @@ const u64 amdgpu_dm_supported_degam_tfs =
BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); BIT(DRM_COLOROP_1D_CURVE_GAMMA22);
const u64 amdgpu_dm_supported_shaper_tfs = const u64 amdgpu_dm_supported_shaper_tfs =
BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) | BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) | BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) | BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) |
BIT(DRM_COLOROP_1D_CURVE_GAMMA22); BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
const u64 amdgpu_dm_supported_blnd_tfs = const u64 amdgpu_dm_supported_blnd_tfs =
BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); BIT(DRM_COLOROP_1D_CURVE_GAMMA22);
#define MAX_COLOR_PIPELINE_OPS 10 #define MAX_COLOR_PIPELINE_OPS 10

View File

@@ -255,6 +255,10 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
BREAK_TO_DEBUGGER(); BREAK_TO_DEBUGGER();
return NULL; return NULL;
} }
if (ctx->dce_version == DCN_VERSION_2_01) {
dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) { if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) {
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base; return &clk_mgr->base;
@@ -267,10 +271,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base; return &clk_mgr->base;
} }
if (ctx->dce_version == DCN_VERSION_2_01) {
dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base; return &clk_mgr->base;
} }

View File

@@ -1785,7 +1785,10 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, enum dc_valid
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_START();
dcn32_override_min_req_memclk(dc, context); dcn32_override_min_req_memclk(dc, context);
DC_FP_END();
dcn32_override_min_req_dcfclk(dc, context); dcn32_override_min_req_dcfclk(dc, context);
BW_VAL_TRACE_END_WATERMARKS(); BW_VAL_TRACE_END_WATERMARKS();

View File

@@ -3454,9 +3454,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_HAINAN) { if (adev->asic_type == CHIP_HAINAN) {
if ((adev->pdev->revision == 0x81) || if ((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0xC3) || (adev->pdev->revision == 0xC3) ||
(adev->pdev->device == 0x6660) ||
(adev->pdev->device == 0x6664) || (adev->pdev->device == 0x6664) ||
(adev->pdev->device == 0x6665) || (adev->pdev->device == 0x6665) ||
(adev->pdev->device == 0x6667)) { (adev->pdev->device == 0x6667) ||
(adev->pdev->device == 0x666F)) {
max_sclk = 75000; max_sclk = 75000;
} }
if ((adev->pdev->revision == 0xC3) || if ((adev->pdev->revision == 0xC3) ||

View File

@@ -848,7 +848,7 @@ static int dw_hdmi_qp_config_audio_infoframe(struct dw_hdmi_qp *hdmi,
regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1); regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1);
regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1); regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1);
regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[4], 1); regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[7], 1);
/* Enable ACR, AUDI, AMD */ /* Enable ACR, AUDI, AMD */
dw_hdmi_qp_mod(hdmi, dw_hdmi_qp_mod(hdmi,

View File

@@ -233,6 +233,7 @@ static void drm_events_release(struct drm_file *file_priv)
void drm_file_free(struct drm_file *file) void drm_file_free(struct drm_file *file)
{ {
struct drm_device *dev; struct drm_device *dev;
int idx;
if (!file) if (!file)
return; return;
@@ -249,9 +250,11 @@ void drm_file_free(struct drm_file *file)
drm_events_release(file); drm_events_release(file);
if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (drm_core_check_feature(dev, DRIVER_MODESET) &&
drm_dev_enter(dev, &idx)) {
drm_fb_release(file); drm_fb_release(file);
drm_property_destroy_user_blobs(dev, file); drm_property_destroy_user_blobs(dev, file);
drm_dev_exit(idx);
} }
if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))

View File

@@ -577,10 +577,13 @@ void drm_mode_config_cleanup(struct drm_device *dev)
*/ */
WARN_ON(!list_empty(&dev->mode_config.fb_list)); WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
if (list_empty(&fb->filp_head) || drm_framebuffer_read_refcount(fb) > 1) {
struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]"); struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
drm_framebuffer_print_info(&p, 1, fb); drm_framebuffer_print_info(&p, 1, fb);
}
list_del_init(&fb->filp_head);
drm_framebuffer_free(&fb->base.refcount); drm_framebuffer_free(&fb->base.refcount);
} }

View File

@@ -65,18 +65,14 @@ static void drm_pagemap_cache_fini(void *arg)
drm_dbg(cache->shrinker->drm, "Destroying dpagemap cache.\n"); drm_dbg(cache->shrinker->drm, "Destroying dpagemap cache.\n");
spin_lock(&cache->lock); spin_lock(&cache->lock);
dpagemap = cache->dpagemap; dpagemap = cache->dpagemap;
if (!dpagemap) {
spin_unlock(&cache->lock);
goto out;
}
if (drm_pagemap_shrinker_cancel(dpagemap)) {
cache->dpagemap = NULL; cache->dpagemap = NULL;
if (dpagemap && !drm_pagemap_shrinker_cancel(dpagemap))
dpagemap = NULL;
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
drm_pagemap_destroy(dpagemap, false);
}
out: if (dpagemap)
drm_pagemap_destroy(dpagemap, false);
mutex_destroy(&cache->lookup_mutex); mutex_destroy(&cache->lookup_mutex);
kfree(cache); kfree(cache);
} }

View File

@@ -806,7 +806,7 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
power_domains->dc_state, val & mask); power_domains->dc_state, val & mask);
enable_dc6 = state & DC_STATE_EN_UPTO_DC6; enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6; dc6_was_enabled = power_domains->dc_state & DC_STATE_EN_UPTO_DC6;
if (!dc6_was_enabled && enable_dc6) if (!dc6_was_enabled && enable_dc6)
intel_dmc_update_dc6_allowed_count(display, true); intel_dmc_update_dc6_allowed_count(display, true);

View File

@@ -1186,6 +1186,7 @@ struct intel_crtc_state {
u32 dc3co_exitline; u32 dc3co_exitline;
u16 su_y_granularity; u16 su_y_granularity;
u8 active_non_psr_pipes; u8 active_non_psr_pipes;
u8 entry_setup_frames;
const char *no_psr_reason; const char *no_psr_reason;
/* /*

View File

@@ -1599,8 +1599,7 @@ static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *
return false; return false;
mutex_lock(&power_domains->lock); mutex_lock(&power_domains->lock);
dc6_enabled = intel_de_read(display, DC_STATE_EN) & dc6_enabled = power_domains->dc_state & DC_STATE_EN_UPTO_DC6;
DC_STATE_EN_UPTO_DC6;
if (dc6_enabled) if (dc6_enabled)
intel_dmc_update_dc6_allowed_count(display, false); intel_dmc_update_dc6_allowed_count(display, false);

View File

@@ -1717,7 +1717,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, conn_state, adjusted_mode); entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, conn_state, adjusted_mode);
if (entry_setup_frames >= 0) { if (entry_setup_frames >= 0) {
intel_dp->psr.entry_setup_frames = entry_setup_frames; crtc_state->entry_setup_frames = entry_setup_frames;
} else { } else {
crtc_state->no_psr_reason = "PSR setup timing not met"; crtc_state->no_psr_reason = "PSR setup timing not met";
drm_dbg_kms(display->drm, drm_dbg_kms(display->drm,
@@ -1815,7 +1815,7 @@ static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
{ {
struct intel_display *display = to_intel_display(intel_dp); struct intel_display *display = to_intel_display(intel_dp);
return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 && return (DISPLAY_VER(display) == 20 && crtc_state->entry_setup_frames > 0 &&
!crtc_state->has_sel_update); !crtc_state->has_sel_update);
} }
@@ -2189,6 +2189,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used; intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines; intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines;
intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines; intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines;
intel_dp->psr.entry_setup_frames = crtc_state->entry_setup_frames;
if (!psr_interrupt_error_check(intel_dp)) if (!psr_interrupt_error_check(intel_dp))
return; return;
@@ -3109,6 +3110,8 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
* - Display WA #1136: skl, bxt * - Display WA #1136: skl, bxt
*/ */
if (intel_crtc_needs_modeset(new_crtc_state) || if (intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_m_n ||
new_crtc_state->update_lrr ||
!new_crtc_state->has_psr || !new_crtc_state->has_psr ||
!new_crtc_state->active_planes || !new_crtc_state->active_planes ||
new_crtc_state->has_sel_update != psr->sel_update_enabled || new_crtc_state->has_sel_update != psr->sel_update_enabled ||

View File

@@ -1967,6 +1967,7 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
if (engine->sanitize) if (engine->sanitize)
engine->sanitize(engine); engine->sanitize(engine);
if (engine->set_default_submission)
engine->set_default_submission(engine); engine->set_default_submission(engine);
} }
} }

View File

@@ -225,29 +225,12 @@ static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
} }
if (pvr_dev->has_safety_events) { if (pvr_dev->has_safety_events) {
int err;
/*
* Ensure the GPU is powered on since some safety events (such
* as ECC faults) can happen outside of job submissions, which
* are otherwise the only time a power reference is held.
*/
err = pvr_power_get(pvr_dev);
if (err) {
drm_err_ratelimited(drm_dev,
"%s: could not take power reference (%d)\n",
__func__, err);
return ret;
}
while (pvr_device_safety_irq_pending(pvr_dev)) { while (pvr_device_safety_irq_pending(pvr_dev)) {
pvr_device_safety_irq_clear(pvr_dev); pvr_device_safety_irq_clear(pvr_dev);
pvr_device_handle_safety_events(pvr_dev); pvr_device_handle_safety_events(pvr_dev);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
} }
pvr_power_put(pvr_dev);
} }
return ret; return ret;

View File

@@ -90,11 +90,11 @@ pvr_power_request_pwr_off(struct pvr_device *pvr_dev)
} }
static int static int
pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset) pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset, bool rpm_suspend)
{ {
if (!hard_reset) {
int err; int err;
if (!hard_reset) {
cancel_delayed_work_sync(&pvr_dev->watchdog.work); cancel_delayed_work_sync(&pvr_dev->watchdog.work);
err = pvr_power_request_idle(pvr_dev); err = pvr_power_request_idle(pvr_dev);
@@ -106,29 +106,47 @@ pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset)
return err; return err;
} }
return pvr_fw_stop(pvr_dev); if (rpm_suspend) {
/* This also waits for late processing of GPU or firmware IRQs in other cores */
disable_irq(pvr_dev->irq);
}
err = pvr_fw_stop(pvr_dev);
if (err && rpm_suspend)
enable_irq(pvr_dev->irq);
return err;
} }
static int static int
pvr_power_fw_enable(struct pvr_device *pvr_dev) pvr_power_fw_enable(struct pvr_device *pvr_dev, bool rpm_resume)
{ {
int err; int err;
if (rpm_resume)
enable_irq(pvr_dev->irq);
err = pvr_fw_start(pvr_dev); err = pvr_fw_start(pvr_dev);
if (err) if (err)
return err; goto out;
err = pvr_wait_for_fw_boot(pvr_dev); err = pvr_wait_for_fw_boot(pvr_dev);
if (err) { if (err) {
drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n"); drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
pvr_fw_stop(pvr_dev); pvr_fw_stop(pvr_dev);
return err; goto out;
} }
queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work, queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
msecs_to_jiffies(WATCHDOG_TIME_MS)); msecs_to_jiffies(WATCHDOG_TIME_MS));
return 0; return 0;
out:
if (rpm_resume)
disable_irq(pvr_dev->irq);
return err;
} }
bool bool
@@ -361,7 +379,7 @@ pvr_power_device_suspend(struct device *dev)
return -EIO; return -EIO;
if (pvr_dev->fw_dev.booted) { if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_disable(pvr_dev, false); err = pvr_power_fw_disable(pvr_dev, false, true);
if (err) if (err)
goto err_drm_dev_exit; goto err_drm_dev_exit;
} }
@@ -391,7 +409,7 @@ pvr_power_device_resume(struct device *dev)
goto err_drm_dev_exit; goto err_drm_dev_exit;
if (pvr_dev->fw_dev.booted) { if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_enable(pvr_dev); err = pvr_power_fw_enable(pvr_dev, true);
if (err) if (err)
goto err_power_off; goto err_power_off;
} }
@@ -510,7 +528,16 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
} }
/* Disable IRQs for the duration of the reset. */ /* Disable IRQs for the duration of the reset. */
if (hard_reset) {
disable_irq(pvr_dev->irq); disable_irq(pvr_dev->irq);
} else {
/*
* Soft reset is triggered as a response to a FW command to the Host and is
* processed from the threaded IRQ handler. This code cannot (nor needs to)
* wait for any IRQ processing to complete.
*/
disable_irq_nosync(pvr_dev->irq);
}
do { do {
if (hard_reset) { if (hard_reset) {
@@ -518,7 +545,7 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
queues_disabled = true; queues_disabled = true;
} }
err = pvr_power_fw_disable(pvr_dev, hard_reset); err = pvr_power_fw_disable(pvr_dev, hard_reset, false);
if (!err) { if (!err) {
if (hard_reset) { if (hard_reset) {
pvr_dev->fw_dev.booted = false; pvr_dev->fw_dev.booted = false;
@@ -541,7 +568,7 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
pvr_fw_irq_clear(pvr_dev); pvr_fw_irq_clear(pvr_dev);
err = pvr_power_fw_enable(pvr_dev); err = pvr_power_fw_enable(pvr_dev, false);
} }
if (err && hard_reset) if (err && hard_reset)

View File

@@ -2915,9 +2915,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (rdev->family == CHIP_HAINAN) { if (rdev->family == CHIP_HAINAN) {
if ((rdev->pdev->revision == 0x81) || if ((rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0xC3) || (rdev->pdev->revision == 0xC3) ||
(rdev->pdev->device == 0x6660) ||
(rdev->pdev->device == 0x6664) || (rdev->pdev->device == 0x6664) ||
(rdev->pdev->device == 0x6665) || (rdev->pdev->device == 0x6665) ||
(rdev->pdev->device == 0x6667)) { (rdev->pdev->device == 0x6667) ||
(rdev->pdev->device == 0x666F)) {
max_sclk = 75000; max_sclk = 75000;
} }
if ((rdev->pdev->revision == 0xC3) || if ((rdev->pdev->revision == 0xC3) ||

View File

@@ -96,12 +96,17 @@ struct vmwgfx_hash_item {
struct vmw_res_func; struct vmw_res_func;
struct vmw_bo;
struct vmw_bo;
struct vmw_resource_dirty;
/** /**
* struct vmw-resource - base class for hardware resources * struct vmw_resource - base class for hardware resources
* *
* @kref: For refcounting. * @kref: For refcounting.
* @dev_priv: Pointer to the device private for this resource. Immutable. * @dev_priv: Pointer to the device private for this resource. Immutable.
* @id: Device id. Protected by @dev_priv::resource_lock. * @id: Device id. Protected by @dev_priv::resource_lock.
* @used_prio: Priority for this resource.
* @guest_memory_size: Guest memory buffer size. Immutable. * @guest_memory_size: Guest memory buffer size. Immutable.
* @res_dirty: Resource contains data not yet in the guest memory buffer. * @res_dirty: Resource contains data not yet in the guest memory buffer.
* Protected by resource reserved. * Protected by resource reserved.
@@ -117,18 +122,16 @@ struct vmw_res_func;
* pin-count greater than zero. It is not on the resource LRU lists and its * pin-count greater than zero. It is not on the resource LRU lists and its
* guest memory buffer is pinned. Hence it can't be evicted. * guest memory buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable. * @func: Method vtable for this resource. Immutable.
* @mob_node; Node for the MOB guest memory rbtree. Protected by * @mob_node: Node for the MOB guest memory rbtree. Protected by
* @guest_memory_bo reserved. * @guest_memory_bo reserved.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
* @binding_head: List head for the context binding list. Protected by * @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex * the @dev_priv::binding_mutex
* @dirty: resource's dirty tracker
* @res_free: The resource destructor. * @res_free: The resource destructor.
* @hw_destroy: Callback to destroy the resource on the device, as part of * @hw_destroy: Callback to destroy the resource on the device, as part of
* resource destruction. * resource destruction.
*/ */
struct vmw_bo;
struct vmw_bo;
struct vmw_resource_dirty;
struct vmw_resource { struct vmw_resource {
struct kref kref; struct kref kref;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
@@ -196,8 +199,8 @@ struct vmw_surface_offset;
* @quality_level: Quality level. * @quality_level: Quality level.
* @autogen_filter: Filter for automatically generated mipmaps. * @autogen_filter: Filter for automatically generated mipmaps.
* @array_size: Number of array elements for a 1D/2D texture. For cubemap * @array_size: Number of array elements for a 1D/2D texture. For cubemap
texture number of faces * array_size. This should be 0 for pre * texture number of faces * array_size. This should be 0 for pre
SM4 device. * SM4 device.
* @buffer_byte_stride: Buffer byte stride. * @buffer_byte_stride: Buffer byte stride.
* @num_sizes: Size of @sizes. For GB surface this should always be 1. * @num_sizes: Size of @sizes. For GB surface this should always be 1.
* @base_size: Surface dimension. * @base_size: Surface dimension.
@@ -265,18 +268,24 @@ struct vmw_fifo_state {
struct vmw_res_cache_entry { struct vmw_res_cache_entry {
uint32_t handle; uint32_t handle;
struct vmw_resource *res; struct vmw_resource *res;
/* private: */
void *private; void *private;
/* public: */
unsigned short valid_handle; unsigned short valid_handle;
unsigned short valid; unsigned short valid;
}; };
/** /**
* enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
* @vmw_dma_alloc_coherent: Use TTM coherent pages
* @vmw_dma_map_populate: Unmap from DMA just after unpopulate
* @vmw_dma_map_bind: Unmap from DMA just before unbind
*/ */
enum vmw_dma_map_mode { enum vmw_dma_map_mode {
vmw_dma_alloc_coherent, /* Use TTM coherent pages */ vmw_dma_alloc_coherent,
vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ vmw_dma_map_populate,
vmw_dma_map_bind, /* Unmap from DMA just before unbind */ vmw_dma_map_bind,
/* private: */
vmw_dma_map_max vmw_dma_map_max
}; };
@@ -284,8 +293,11 @@ enum vmw_dma_map_mode {
* struct vmw_sg_table - Scatter/gather table for binding, with additional * struct vmw_sg_table - Scatter/gather table for binding, with additional
* device-specific information. * device-specific information.
* *
* @mode: which page mapping mode to use
* @pages: Array of page pointers to the pages.
* @addrs: DMA addresses to the pages if coherent pages are used.
* @sgt: Pointer to a struct sg_table with binding information * @sgt: Pointer to a struct sg_table with binding information
* @num_regions: Number of regions with device-address contiguous pages * @num_pages: Number of @pages
*/ */
struct vmw_sg_table { struct vmw_sg_table {
enum vmw_dma_map_mode mode; enum vmw_dma_map_mode mode;
@@ -353,6 +365,7 @@ struct vmw_ctx_validation_info;
* than from user-space * than from user-space
* @fp: If @kernel is false, points to the file of the client. Otherwise * @fp: If @kernel is false, points to the file of the client. Otherwise
* NULL * NULL
* @filp: DRM state for this file
* @cmd_bounce: Command bounce buffer used for command validation before * @cmd_bounce: Command bounce buffer used for command validation before
* copying to fifo space * copying to fifo space
* @cmd_bounce_size: Current command bounce buffer size * @cmd_bounce_size: Current command bounce buffer size
@@ -729,7 +742,7 @@ extern void vmw_svga_disable(struct vmw_private *dev_priv);
bool vmwgfx_supported(struct vmw_private *vmw); bool vmwgfx_supported(struct vmw_private *vmw);
/** /*
* GMR utilities - vmwgfx_gmr.c * GMR utilities - vmwgfx_gmr.c
*/ */
@@ -739,7 +752,7 @@ extern int vmw_gmr_bind(struct vmw_private *dev_priv,
int gmr_id); int gmr_id);
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/** /*
* User handles * User handles
*/ */
struct vmw_user_object { struct vmw_user_object {
@@ -759,7 +772,7 @@ void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size);
void vmw_user_object_unmap(struct vmw_user_object *uo); void vmw_user_object_unmap(struct vmw_user_object *uo);
bool vmw_user_object_is_mapped(struct vmw_user_object *uo); bool vmw_user_object_is_mapped(struct vmw_user_object *uo);
/** /*
* Resource utilities - vmwgfx_resource.c * Resource utilities - vmwgfx_resource.c
*/ */
struct vmw_user_resource_conv; struct vmw_user_resource_conv;
@@ -819,7 +832,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
return !RB_EMPTY_NODE(&res->mob_node); return !RB_EMPTY_NODE(&res->mob_node);
} }
/** /*
* GEM related functionality - vmwgfx_gem.c * GEM related functionality - vmwgfx_gem.c
*/ */
struct vmw_bo_params; struct vmw_bo_params;
@@ -833,7 +846,7 @@ extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
extern void vmw_debugfs_gem_init(struct vmw_private *vdev); extern void vmw_debugfs_gem_init(struct vmw_private *vdev);
/** /*
* Misc Ioctl functionality - vmwgfx_ioctl.c * Misc Ioctl functionality - vmwgfx_ioctl.c
*/ */
@@ -846,7 +859,7 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data,
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
/** /*
* Fifo utilities - vmwgfx_fifo.c * Fifo utilities - vmwgfx_fifo.c
*/ */
@@ -880,9 +893,11 @@ extern int vmw_cmd_flush(struct vmw_private *dev_priv,
/** /**
* vmw_fifo_caps - Returns the capabilities of the FIFO command * vmw_fifo_caps - Get the capabilities of the FIFO command
* queue or 0 if fifo memory isn't present. * queue or 0 if fifo memory isn't present.
* @dev_priv: The device private context * @dev_priv: The device private context
*
* Returns: capabilities of the FIFO command or %0 if fifo memory not present
*/ */
static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv) static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv)
{ {
@@ -893,9 +908,11 @@ static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv)
/** /**
* vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3 * vmw_is_cursor_bypass3_enabled - check Cursor Bypass 3 enabled setting
* is enabled in the FIFO. * in the FIFO.
* @dev_priv: The device private context * @dev_priv: The device private context
*
* Returns: %true iff Cursor Bypass 3 is enabled in the FIFO
*/ */
static inline bool static inline bool
vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
@@ -903,7 +920,7 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0; return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0;
} }
/** /*
* TTM buffer object driver - vmwgfx_ttm_buffer.c * TTM buffer object driver - vmwgfx_ttm_buffer.c
*/ */
@@ -927,7 +944,7 @@ extern void vmw_piter_start(struct vmw_piter *viter,
* *
* @viter: Pointer to the iterator to advance. * @viter: Pointer to the iterator to advance.
* *
* Returns false if past the list of pages, true otherwise. * Returns: false if past the list of pages, true otherwise.
*/ */
static inline bool vmw_piter_next(struct vmw_piter *viter) static inline bool vmw_piter_next(struct vmw_piter *viter)
{ {
@@ -939,7 +956,7 @@ static inline bool vmw_piter_next(struct vmw_piter *viter)
* *
* @viter: Pointer to the iterator * @viter: Pointer to the iterator
* *
* Returns the DMA address of the page pointed to by @viter. * Returns: the DMA address of the page pointed to by @viter.
*/ */
static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
{ {
@@ -951,14 +968,14 @@ static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
* *
* @viter: Pointer to the iterator * @viter: Pointer to the iterator
* *
* Returns the DMA address of the page pointed to by @viter. * Returns: the DMA address of the page pointed to by @viter.
*/ */
static inline struct page *vmw_piter_page(struct vmw_piter *viter) static inline struct page *vmw_piter_page(struct vmw_piter *viter)
{ {
return viter->pages[viter->i]; return viter->pages[viter->i];
} }
/** /*
* Command submission - vmwgfx_execbuf.c * Command submission - vmwgfx_execbuf.c
*/ */
@@ -993,7 +1010,7 @@ extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
int32_t out_fence_fd); int32_t out_fence_fd);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/** /*
* IRQs and wating - vmwgfx_irq.c * IRQs and wating - vmwgfx_irq.c
*/ */
@@ -1016,7 +1033,7 @@ bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
bool vmw_generic_waiter_remove(struct vmw_private *dev_priv, bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count); u32 flag, int *waiter_count);
/** /*
* Kernel modesetting - vmwgfx_kms.c * Kernel modesetting - vmwgfx_kms.c
*/ */
@@ -1048,7 +1065,7 @@ extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res); extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
/** /*
* Overlay control - vmwgfx_overlay.c * Overlay control - vmwgfx_overlay.c
*/ */
@@ -1063,20 +1080,20 @@ int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
int vmw_overlay_num_overlays(struct vmw_private *dev_priv); int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
/** /*
* GMR Id manager * GMR Id manager
*/ */
int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type);
void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type);
/** /*
* System memory manager * System memory manager
*/ */
int vmw_sys_man_init(struct vmw_private *dev_priv); int vmw_sys_man_init(struct vmw_private *dev_priv);
void vmw_sys_man_fini(struct vmw_private *dev_priv); void vmw_sys_man_fini(struct vmw_private *dev_priv);
/** /*
* Prime - vmwgfx_prime.c * Prime - vmwgfx_prime.c
*/ */
@@ -1292,7 +1309,7 @@ extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
* @line: The current line of the blit. * @line: The current line of the blit.
* @line_offset: Offset of the current line segment. * @line_offset: Offset of the current line segment.
* @cpp: Bytes per pixel (granularity information). * @cpp: Bytes per pixel (granularity information).
* @memcpy: Which memcpy function to use. * @do_cpy: Which memcpy function to use.
*/ */
struct vmw_diff_cpy { struct vmw_diff_cpy {
struct drm_rect rect; struct drm_rect rect;
@@ -1380,13 +1397,14 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
/** /**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting * VMW_DEBUG_KMS - Debug output for kernel mode-setting
* @fmt: format string for the args
* *
* This macro is for debugging vmwgfx mode-setting code. * This macro is for debugging vmwgfx mode-setting code.
*/ */
#define VMW_DEBUG_KMS(fmt, ...) \ #define VMW_DEBUG_KMS(fmt, ...) \
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/** /*
* Inline helper functions * Inline helper functions
*/ */
@@ -1417,11 +1435,13 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
/** /**
* vmw_fifo_mem_read - Perform a MMIO read from the fifo memory * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory
* * @vmw: The device private structure
* @fifo_reg: The fifo register to read from * @fifo_reg: The fifo register to read from
* *
* This function is intended to be equivalent to ioread32() on * This function is intended to be equivalent to ioread32() on
* memremap'd memory, but without byteswapping. * memremap'd memory, but without byteswapping.
*
* Returns: the value read
*/ */
static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg) static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
{ {
@@ -1431,8 +1451,9 @@ static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
/** /**
* vmw_fifo_mem_write - Perform a MMIO write to volatile memory * vmw_fifo_mem_write - Perform a MMIO write to volatile memory
* * @vmw: The device private structure
* @addr: The fifo register to write to * @fifo_reg: The fifo register to write to
* @value: The value to write
* *
* This function is intended to be equivalent to iowrite32 on * This function is intended to be equivalent to iowrite32 on
* memremap'd memory, but without byteswapping. * memremap'd memory, but without byteswapping.

View File

@@ -771,6 +771,7 @@ err_out:
ret = vmw_bo_dirty_add(bo); ret = vmw_bo_dirty_add(bo);
if (!ret && surface && surface->res.func->dirty_alloc) { if (!ret && surface && surface->res.func->dirty_alloc) {
surface->res.coherent = true; surface->res.coherent = true;
if (surface->res.dirty == NULL)
ret = surface->res.func->dirty_alloc(&surface->res); ret = surface->res.func->dirty_alloc(&surface->res);
} }
ttm_bo_unreserve(&bo->tbo); ttm_bo_unreserve(&bo->tbo);

View File

@@ -313,6 +313,8 @@ static void dev_fini_ggtt(void *arg)
{ {
struct xe_ggtt *ggtt = arg; struct xe_ggtt *ggtt = arg;
scoped_guard(mutex, &ggtt->lock)
ggtt->flags &= ~XE_GGTT_FLAGS_ONLINE;
drain_workqueue(ggtt->wq); drain_workqueue(ggtt->wq);
} }
@@ -377,6 +379,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
if (err) if (err)
return err; return err;
ggtt->flags |= XE_GGTT_FLAGS_ONLINE;
err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt); err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
if (err) if (err)
return err; return err;
@@ -410,13 +413,10 @@ static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
static void ggtt_node_remove(struct xe_ggtt_node *node) static void ggtt_node_remove(struct xe_ggtt_node *node)
{ {
struct xe_ggtt *ggtt = node->ggtt; struct xe_ggtt *ggtt = node->ggtt;
struct xe_device *xe = tile_to_xe(ggtt->tile);
bool bound; bool bound;
int idx;
bound = drm_dev_enter(&xe->drm, &idx);
mutex_lock(&ggtt->lock); mutex_lock(&ggtt->lock);
bound = ggtt->flags & XE_GGTT_FLAGS_ONLINE;
if (bound) if (bound)
xe_ggtt_clear(ggtt, node->base.start, node->base.size); xe_ggtt_clear(ggtt, node->base.start, node->base.size);
drm_mm_remove_node(&node->base); drm_mm_remove_node(&node->base);
@@ -429,8 +429,6 @@ static void ggtt_node_remove(struct xe_ggtt_node *node)
if (node->invalidate_on_remove) if (node->invalidate_on_remove)
xe_ggtt_invalidate(ggtt); xe_ggtt_invalidate(ggtt);
drm_dev_exit(idx);
free_node: free_node:
xe_ggtt_node_fini(node); xe_ggtt_node_fini(node);
} }

View File

@@ -29,10 +29,13 @@ struct xe_ggtt {
u64 size; u64 size;
#define XE_GGTT_FLAGS_64K BIT(0) #define XE_GGTT_FLAGS_64K BIT(0)
#define XE_GGTT_FLAGS_ONLINE BIT(1)
/** /**
* @flags: Flags for this GGTT * @flags: Flags for this GGTT
* Acceptable flags: * Acceptable flags:
* - %XE_GGTT_FLAGS_64K - if PTE size is 64K. Otherwise, regular is 4K. * - %XE_GGTT_FLAGS_64K - if PTE size is 64K. Otherwise, regular is 4K.
* - %XE_GGTT_FLAGS_ONLINE - is GGTT online, protected by ggtt->lock
* after init
*/ */
unsigned int flags; unsigned int flags;
/** @scratch: Internal object allocation used as a scratch page */ /** @scratch: Internal object allocation used as a scratch page */

View File

@@ -12,6 +12,7 @@
#include "xe_gt_printk.h" #include "xe_gt_printk.h"
#include "xe_gt_sysfs.h" #include "xe_gt_sysfs.h"
#include "xe_mmio.h" #include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_sriov.h" #include "xe_sriov.h"
static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
@@ -150,6 +151,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
xe_gt_info(gt, "Setting compute mode to %d\n", num_engines); xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
gt->ccs_mode = num_engines; gt->ccs_mode = num_engines;
xe_gt_record_user_engines(gt); xe_gt_record_user_engines(gt);
guard(xe_pm_runtime)(xe);
xe_gt_reset(gt); xe_gt_reset(gt);
} }

View File

@@ -1124,14 +1124,14 @@ static int guc_wait_ucode(struct xe_guc *guc)
struct xe_guc_pc *guc_pc = &gt->uc.guc.pc; struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
u32 before_freq, act_freq, cur_freq; u32 before_freq, act_freq, cur_freq;
u32 status = 0, tries = 0; u32 status = 0, tries = 0;
int load_result, ret;
ktime_t before; ktime_t before;
u64 delta_ms; u64 delta_ms;
int ret;
before_freq = xe_guc_pc_get_act_freq(guc_pc); before_freq = xe_guc_pc_get_act_freq(guc_pc);
before = ktime_get(); before = ktime_get();
ret = poll_timeout_us(ret = guc_load_done(gt, &status, &tries), ret, ret = poll_timeout_us(load_result = guc_load_done(gt, &status, &tries), load_result,
10 * USEC_PER_MSEC, 10 * USEC_PER_MSEC,
GUC_LOAD_TIMEOUT_SEC * USEC_PER_SEC, false); GUC_LOAD_TIMEOUT_SEC * USEC_PER_SEC, false);
@@ -1139,7 +1139,7 @@ static int guc_wait_ucode(struct xe_guc *guc)
act_freq = xe_guc_pc_get_act_freq(guc_pc); act_freq = xe_guc_pc_get_act_freq(guc_pc);
cur_freq = xe_guc_pc_get_cur_freq_fw(guc_pc); cur_freq = xe_guc_pc_get_cur_freq_fw(guc_pc);
if (ret) { if (ret || load_result <= 0) {
xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz)\n", xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz)\n",
status, delta_ms, xe_guc_pc_get_act_freq(guc_pc), status, delta_ms, xe_guc_pc_get_act_freq(guc_pc),
xe_guc_pc_get_cur_freq_fw(guc_pc)); xe_guc_pc_get_cur_freq_fw(guc_pc));
@@ -1347,15 +1347,37 @@ int xe_guc_enable_communication(struct xe_guc *guc)
return 0; return 0;
} }
int xe_guc_suspend(struct xe_guc *guc) /**
* xe_guc_softreset() - Soft reset GuC
* @guc: The GuC object
*
* Send soft reset command to GuC through mmio send.
*
* Return: 0 if success, otherwise error code
*/
int xe_guc_softreset(struct xe_guc *guc)
{ {
struct xe_gt *gt = guc_to_gt(guc);
u32 action[] = { u32 action[] = {
XE_GUC_ACTION_CLIENT_SOFT_RESET, XE_GUC_ACTION_CLIENT_SOFT_RESET,
}; };
int ret; int ret;
if (!xe_uc_fw_is_running(&guc->fw))
return 0;
ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action)); ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
if (ret)
return ret;
return 0;
}
int xe_guc_suspend(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
int ret;
ret = xe_guc_softreset(guc);
if (ret) { if (ret) {
xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret)); xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret));
return ret; return ret;

View File

@@ -44,6 +44,7 @@ int xe_guc_opt_in_features_enable(struct xe_guc *guc);
void xe_guc_runtime_suspend(struct xe_guc *guc); void xe_guc_runtime_suspend(struct xe_guc *guc);
void xe_guc_runtime_resume(struct xe_guc *guc); void xe_guc_runtime_resume(struct xe_guc *guc);
int xe_guc_suspend(struct xe_guc *guc); int xe_guc_suspend(struct xe_guc *guc);
int xe_guc_softreset(struct xe_guc *guc);
void xe_guc_notify(struct xe_guc *guc); void xe_guc_notify(struct xe_guc *guc);
int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr); int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr);
int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len); int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len);

View File

@@ -345,6 +345,7 @@ static void guc_action_disable_ct(void *arg)
{ {
struct xe_guc_ct *ct = arg; struct xe_guc_ct *ct = arg;
xe_guc_ct_stop(ct);
guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED); guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
} }

View File

@@ -48,6 +48,8 @@
#define XE_GUC_EXEC_QUEUE_CGP_CONTEXT_ERROR_LEN 6 #define XE_GUC_EXEC_QUEUE_CGP_CONTEXT_ERROR_LEN 6
static int guc_submit_reset_prepare(struct xe_guc *guc);
static struct xe_guc * static struct xe_guc *
exec_queue_to_guc(struct xe_exec_queue *q) exec_queue_to_guc(struct xe_exec_queue *q)
{ {
@@ -239,7 +241,7 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
EXEC_QUEUE_STATE_BANNED)); EXEC_QUEUE_STATE_BANNED));
} }
static void guc_submit_fini(struct drm_device *drm, void *arg) static void guc_submit_sw_fini(struct drm_device *drm, void *arg)
{ {
struct xe_guc *guc = arg; struct xe_guc *guc = arg;
struct xe_device *xe = guc_to_xe(guc); struct xe_device *xe = guc_to_xe(guc);
@@ -257,6 +259,19 @@ static void guc_submit_fini(struct drm_device *drm, void *arg)
xa_destroy(&guc->submission_state.exec_queue_lookup); xa_destroy(&guc->submission_state.exec_queue_lookup);
} }
static void guc_submit_fini(void *arg)
{
struct xe_guc *guc = arg;
/* Forcefully kill any remaining exec queues */
xe_guc_ct_stop(&guc->ct);
guc_submit_reset_prepare(guc);
xe_guc_softreset(guc);
xe_guc_submit_stop(guc);
xe_uc_fw_sanitize(&guc->fw);
xe_guc_submit_pause_abort(guc);
}
static void guc_submit_wedged_fini(void *arg) static void guc_submit_wedged_fini(void *arg)
{ {
struct xe_guc *guc = arg; struct xe_guc *guc = arg;
@@ -326,7 +341,11 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
guc->submission_state.initialized = true; guc->submission_state.initialized = true;
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc); err = drmm_add_action_or_reset(&xe->drm, guc_submit_sw_fini, guc);
if (err)
return err;
return devm_add_action_or_reset(xe->drm.dev, guc_submit_fini, guc);
} }
/* /*
@@ -1252,6 +1271,7 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
*/ */
void xe_guc_submit_wedge(struct xe_guc *guc) void xe_guc_submit_wedge(struct xe_guc *guc)
{ {
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc); struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q; struct xe_exec_queue *q;
unsigned long index; unsigned long index;
@@ -1266,12 +1286,12 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
if (!guc->submission_state.initialized) if (!guc->submission_state.initialized)
return; return;
if (xe->wedged.mode == 2) {
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev, err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
guc_submit_wedged_fini, guc); guc_submit_wedged_fini, guc);
if (err) { if (err) {
xe_gt_err(gt, "Failed to register clean-up in wedged.mode=%s; " xe_gt_err(gt, "Failed to register clean-up on wedged.mode=2; "
"Although device is wedged.\n", "Although device is wedged.\n");
xe_wedged_mode_to_string(XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET));
return; return;
} }
@@ -1280,6 +1300,14 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
if (xe_exec_queue_get_unless_zero(q)) if (xe_exec_queue_get_unless_zero(q))
set_exec_queue_wedged(q); set_exec_queue_wedged(q);
mutex_unlock(&guc->submission_state.lock); mutex_unlock(&guc->submission_state.lock);
} else {
/* Forcefully kill any remaining exec queues, signal fences */
guc_submit_reset_prepare(guc);
xe_guc_submit_stop(guc);
xe_guc_softreset(guc);
xe_uc_fw_sanitize(&guc->fw);
xe_guc_submit_pause_abort(guc);
}
} }
static bool guc_submit_hint_wedged(struct xe_guc *guc) static bool guc_submit_hint_wedged(struct xe_guc *guc)
@@ -2230,6 +2258,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
{ {
struct xe_gpu_scheduler *sched = &q->guc->sched; struct xe_gpu_scheduler *sched = &q->guc->sched;
bool do_destroy = false;
/* Stop scheduling + flush any DRM scheduler operations */ /* Stop scheduling + flush any DRM scheduler operations */
xe_sched_submission_stop(sched); xe_sched_submission_stop(sched);
@@ -2237,7 +2266,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
/* Clean up lost G2H + reset engine state */ /* Clean up lost G2H + reset engine state */
if (exec_queue_registered(q)) { if (exec_queue_registered(q)) {
if (exec_queue_destroyed(q)) if (exec_queue_destroyed(q))
__guc_exec_queue_destroy(guc, q); do_destroy = true;
} }
if (q->guc->suspend_pending) { if (q->guc->suspend_pending) {
set_exec_queue_suspended(q); set_exec_queue_suspended(q);
@@ -2273,18 +2302,15 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
xe_guc_exec_queue_trigger_cleanup(q); xe_guc_exec_queue_trigger_cleanup(q);
} }
} }
if (do_destroy)
__guc_exec_queue_destroy(guc, q);
} }
int xe_guc_submit_reset_prepare(struct xe_guc *guc) static int guc_submit_reset_prepare(struct xe_guc *guc)
{ {
int ret; int ret;
if (xe_gt_WARN_ON(guc_to_gt(guc), vf_recovery(guc)))
return 0;
if (!guc->submission_state.initialized)
return 0;
/* /*
* Using an atomic here rather than submission_state.lock as this * Using an atomic here rather than submission_state.lock as this
* function can be called while holding the CT lock (engine reset * function can be called while holding the CT lock (engine reset
@@ -2299,6 +2325,17 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
return ret; return ret;
} }
int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
if (xe_gt_WARN_ON(guc_to_gt(guc), vf_recovery(guc)))
return 0;
if (!guc->submission_state.initialized)
return 0;
return guc_submit_reset_prepare(guc);
}
void xe_guc_submit_reset_wait(struct xe_guc *guc) void xe_guc_submit_reset_wait(struct xe_guc *guc)
{ {
wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) || wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) ||
@@ -2695,8 +2732,7 @@ void xe_guc_submit_pause_abort(struct xe_guc *guc)
continue; continue;
xe_sched_submission_start(sched); xe_sched_submission_start(sched);
if (exec_queue_killed_or_banned_or_wedged(q)) guc_exec_queue_kill(q);
xe_guc_exec_queue_trigger_cleanup(q);
} }
mutex_unlock(&guc->submission_state.lock); mutex_unlock(&guc->submission_state.lock);
} }

View File

@@ -2413,14 +2413,14 @@ static int get_ctx_timestamp(struct xe_lrc *lrc, u32 engine_id, u64 *reg_ctx_ts)
* @lrc: Pointer to the lrc. * @lrc: Pointer to the lrc.
* *
* Return latest ctx timestamp. With support for active contexts, the * Return latest ctx timestamp. With support for active contexts, the
* calculation may bb slightly racy, so follow a read-again logic to ensure that * calculation may be slightly racy, so follow a read-again logic to ensure that
* the context is still active before returning the right timestamp. * the context is still active before returning the right timestamp.
* *
* Returns: New ctx timestamp value * Returns: New ctx timestamp value
*/ */
u64 xe_lrc_timestamp(struct xe_lrc *lrc) u64 xe_lrc_timestamp(struct xe_lrc *lrc)
{ {
u64 lrc_ts, reg_ts, new_ts; u64 lrc_ts, reg_ts, new_ts = lrc->ctx_timestamp;
u32 engine_id; u32 engine_id;
lrc_ts = xe_lrc_ctx_timestamp(lrc); lrc_ts = xe_lrc_ctx_timestamp(lrc);

View File

@@ -543,8 +543,7 @@ static ssize_t xe_oa_read(struct file *file, char __user *buf,
size_t offset = 0; size_t offset = 0;
int ret; int ret;
/* Can't read from disabled streams */ if (!stream->sample)
if (!stream->enabled || !stream->sample)
return -EINVAL; return -EINVAL;
if (!(file->f_flags & O_NONBLOCK)) { if (!(file->f_flags & O_NONBLOCK)) {
@@ -1460,6 +1459,10 @@ static void xe_oa_stream_disable(struct xe_oa_stream *stream)
if (stream->sample) if (stream->sample)
hrtimer_cancel(&stream->poll_check_timer); hrtimer_cancel(&stream->poll_check_timer);
/* Update stream->oa_buffer.tail to allow any final reports to be read */
if (xe_oa_buffer_check_unlocked(stream))
wake_up(&stream->poll_wq);
} }
static int xe_oa_enable_preempt_timeslice(struct xe_oa_stream *stream) static int xe_oa_enable_preempt_timeslice(struct xe_oa_stream *stream)

View File

@@ -1655,14 +1655,35 @@ static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset,
XE_WARN_ON(!level); XE_WARN_ON(!level);
/* Check for leaf node */ /* Check for leaf node */
if (xe_walk->prl && xe_page_reclaim_list_valid(xe_walk->prl) && if (xe_walk->prl && xe_page_reclaim_list_valid(xe_walk->prl) &&
(!xe_child->base.children || !xe_child->base.children[first])) { xe_child->level <= MAX_HUGEPTE_LEVEL) {
struct iosys_map *leaf_map = &xe_child->bo->vmap; struct iosys_map *leaf_map = &xe_child->bo->vmap;
pgoff_t count = xe_pt_num_entries(addr, next, xe_child->level, walk); pgoff_t count = xe_pt_num_entries(addr, next, xe_child->level, walk);
for (pgoff_t i = 0; i < count; i++) { for (pgoff_t i = 0; i < count; i++) {
u64 pte = xe_map_rd(xe, leaf_map, (first + i) * sizeof(u64), u64); u64 pte;
int ret; int ret;
/*
* If not a leaf pt, skip unless non-leaf pt is interleaved between
* leaf ptes which causes the page walk to skip over the child leaves
*/
if (xe_child->base.children && xe_child->base.children[first + i]) {
u64 pt_size = 1ULL << walk->shifts[xe_child->level];
bool edge_pt = (i == 0 && !IS_ALIGNED(addr, pt_size)) ||
(i == count - 1 && !IS_ALIGNED(next, pt_size));
if (!edge_pt) {
xe_page_reclaim_list_abort(xe_walk->tile->primary_gt,
xe_walk->prl,
"PT is skipped by walk at level=%u offset=%lu",
xe_child->level, first + i);
break;
}
continue;
}
pte = xe_map_rd(xe, leaf_map, (first + i) * sizeof(u64), u64);
/* /*
* In rare scenarios, pte may not be written yet due to racy conditions. * In rare scenarios, pte may not be written yet due to racy conditions.
* In such cases, invalidate the PRL and fallback to full PPC invalidation. * In such cases, invalidate the PRL and fallback to full PPC invalidation.
@@ -1674,9 +1695,8 @@ static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset,
} }
/* Ensure it is a defined page */ /* Ensure it is a defined page */
xe_tile_assert(xe_walk->tile, xe_tile_assert(xe_walk->tile, xe_child->level == 0 ||
xe_child->level == 0 || (pte & (XE_PDE_PS_2M | XE_PDPE_PS_1G)));
(pte & (XE_PTE_PS64 | XE_PDE_PS_2M | XE_PDPE_PS_1G)));
/* An entry should be added for 64KB but contigious 4K have XE_PTE_PS64 */ /* An entry should be added for 64KB but contigious 4K have XE_PTE_PS64 */
if (pte & XE_PTE_PS64) if (pte & XE_PTE_PS64)
@@ -1701,11 +1721,11 @@ static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset,
killed = xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk); killed = xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk);
/* /*
* Verify PRL is active and if entry is not a leaf pte (base.children conditions), * Verify if any PTE are potentially dropped at non-leaf levels, either from being
* there is a potential need to invalidate the PRL if any PTE (num_live) are dropped. * killed or the page walk covers the region.
*/ */
if (xe_walk->prl && level > 1 && xe_child->num_live && if (xe_walk->prl && xe_page_reclaim_list_valid(xe_walk->prl) &&
xe_child->base.children && xe_child->base.children[first]) { xe_child->level > MAX_HUGEPTE_LEVEL && xe_child->num_live) {
bool covered = xe_pt_covers(addr, next, xe_child->level, &xe_walk->base); bool covered = xe_pt_covers(addr, next, xe_child->level, &xe_walk->base);
/* /*