2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

drm fixes for 6.17-rc3

rust:
 - drm device memory layout and safety fixes
 
 tests:
 - Endianness fixes
 
 gpuvm:
 - docs warning fix
 
 panic:
 - fix division on 32-bit arm
 
 i915:
 - TypeC DP display Fixes
 - Silence rpm wakeref asserts on GEN11_GU_MISC_IIR access
 - Relocate compression repacking WA for JSL/EHL
 
 xe:
 - xe_vm_create fixes
 - fix vm bind ioctl double free
 
 amdgpu:
 - Replay fixes
 - SMU14 fix
 - Null check DC fixes
 - DCE6 DC fixes
 - Misc DC fixes
 
 bridge:
 - analogix_dp: devm_drm_bridge_alloc() error handling fix
 
 habanalabs:
 - Memory deallocation fix
 
 hibmc:
 - modesetting black screen fixes
 - fix UAF on irq
 - fix leak on i2c failure path
 
 nouveau:
 - memory leak fixes
 - typos
 
 rockchip:
 - Kconfig fix
 - register caching fix
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmio5O4ACgkQDHTzWXnE
 hr7CTQ//b+Qz9Qt/hxqoKVO7dX9RDQmXrR+BxCiQTkVGdzlrMKCLS4s5FPElq5II
 wSQ/Ya3r6rdy9MP8Jf+LTxY1UDHaAqhjE5cO/ETfzgE/2I3/rOb92/ITnCrA8ATc
 w2w1oM1pZ6SI9+Aigmbb6ivTKDjI2m/+VAR2YFfXVdZ6D1YggLXj2O4+IXVZHiGD
 LesusmoiyTiP+t5NroPSlXwKMJJVnTfQ8a3qUOzgplOIvPuQpB4EBesdPLAjuqos
 VYM0tXDEOrMy7taSpStgcn0PmT6rTHSC8RU+tXEtyB4EUqkfTrbyRQjH8DrQPUq1
 gFDj265AzfCnRGTMSF52taDViJzHV3JkgibrFD71eejgqOw9NfICBfnYczt4W5jv
 dCnPXdVtMk4VVogqma9DiC9+ZWaPqRlD85NVQnwfy3rn8hEmpOlvs4vGa+zUxC3J
 iMYZnZPFHeGtmqweH0TSB1RNvYQgYAj+j7cKJWFEfhUrQYqRYppqzKtybUamR6JJ
 kEKXLGEoIvo84LvX71evxL8ogPo68kYdd4EWKpM2PdqQEsmfvEWWbO1SpGzRqmnm
 UOkDB2pQVEzfbO4Rsz7olcZBNqEcEf8Nq24z/laA4B7B9E7wAFCBjqpx/2Ih7dBG
 bknJI0ghm1Uxd4ZMa0c2ejxIcgxQ4DHC9MA+HFtDZU0SXrsyskY=
 =GfgI
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2025-08-23-1' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Weekly drm fixes. Looks like things did indeed get busier after rc2,
  nothing seems too major, but stuff scattered all over the place,
  amdgpu, xe, i915, hibmc, rust support code, and other small fixes.

  rust:
   - drm device memory layout and safety fixes

  tests:
   - Endianness fixes

  gpuvm:
   - docs warning fix

  panic:
   - fix division on 32-bit arm

  i915:
   - TypeC DP display Fixes
   - Silence rpm wakeref asserts on GEN11_GU_MISC_IIR access
   - Relocate compression repacking WA for JSL/EHL

  xe:
   - xe_vm_create fixes
   - fix vm bind ioctl double free

  amdgpu:
   - Replay fixes
   - SMU14 fix
   - Null check DC fixes
   - DCE6 DC fixes
   - Misc DC fixes

  bridge:
   - analogix_dp: devm_drm_bridge_alloc() error handling fix

  habanalabs:
   - Memory deallocation fix

  hibmc:
   - modesetting black screen fixes
   - fix UAF on irq
   - fix leak on i2c failure path

  nouveau:
   - memory leak fixes
   - typos

  rockchip:
   - Kconfig fix
   - register caching fix"

* tag 'drm-fixes-2025-08-23-1' of https://gitlab.freedesktop.org/drm/kernel: (49 commits)
  drm/xe: Fix vm_bind_ioctl double free bug
  drm/xe: Move ASID allocation and user PT BO tracking into xe_vm_create
  drm/xe: Assign ioctl xe file handler to vm in xe_vm_create
  drm/i915/gt: Relocate compression repacking WA for JSL/EHL
  drm/i915: silence rpm wakeref asserts on GEN11_GU_MISC_IIR access
  drm/amd/display: Fix DP audio DTO1 clock source on DCE 6.
  drm/amd/display: Fix fractional fb divider in set_pixel_clock_v3
  drm/amd/display: Don't print errors for nonexistent connectors
  drm/amd/display: Don't warn when missing DCE encoder caps
  drm/amd/display: Fill display clock and vblank time in dce110_fill_display_configs
  drm/amd/display: Find first CRTC and its line time in dce110_fill_display_configs
  drm/amd/display: Adjust DCE 8-10 clock, don't overclock by 15%
  drm/amd/display: Don't overclock DCE 6 by 15%
  drm/amd/display: Add null pointer check in mod_hdcp_hdcp1_create_session()
  drm/amd/display: Fix Xorg desktop unresponsive on Replay panel
  drm/amd/display: Avoid a NULL pointer dereference
  drm/amdgpu/swm14: Update power limit logic
  drm/amd/display: Revert Add HPO encoder support to Replay
  drm/i915/icl+/tc: Convert AUX powered WARN to a debug message
  drm/i915/lnl+/tc: Use the cached max lane count value
  ...
This commit is contained in:
Linus Torvalds 2025-08-22 18:16:54 -04:00
commit 6debb69041
46 changed files with 382 additions and 239 deletions

View File

@ -8426,6 +8426,17 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/scheduler/ F: drivers/gpu/drm/scheduler/
F: include/drm/gpu_scheduler.h F: include/drm/gpu_scheduler.h
DRM GPUVM
M: Danilo Krummrich <dakr@kernel.org>
R: Matthew Brost <matthew.brost@intel.com>
R: Thomas Hellström <thomas.hellstrom@linux.intel.com>
R: Alice Ryhl <aliceryhl@google.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/drm_gpuvm.c
F: include/drm/drm_gpuvm.h
DRM LOG DRM LOG
M: Jocelyn Falempe <jfalempe@redhat.com> M: Jocelyn Falempe <jfalempe@redhat.com>
M: Javier Martinez Canillas <javierm@redhat.com> M: Javier Martinez Canillas <javierm@redhat.com>
@ -10655,7 +10666,8 @@ S: Maintained
F: block/partitions/efi.* F: block/partitions/efi.*
HABANALABS PCI DRIVER HABANALABS PCI DRIVER
M: Yaron Avizrat <yaron.avizrat@intel.com> M: Koby Elbaz <koby.elbaz@intel.com>
M: Konstantin Sinyuk <konstantin.sinyuk@intel.com>
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
S: Supported S: Supported
C: irc://irc.oftc.net/dri-devel C: irc://irc.oftc.net/dri-devel

View File

@ -10437,7 +10437,7 @@ end:
(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64); (u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
WREG32(sob_addr, 0); WREG32(sob_addr, 0);
kfree(lin_dma_pkts_arr); kvfree(lin_dma_pkts_arr);
return rc; return rc;
} }

View File

@ -514,7 +514,7 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
return false; return false;
if (drm_gem_is_imported(obj)) { if (drm_gem_is_imported(obj)) {
struct dma_buf *dma_buf = obj->dma_buf; struct dma_buf *dma_buf = obj->import_attach->dmabuf;
if (dma_buf->ops != &amdgpu_dmabuf_ops) if (dma_buf->ops != &amdgpu_dmabuf_ops)
/* No XGMI with non AMD GPUs */ /* No XGMI with non AMD GPUs */

View File

@ -317,7 +317,8 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
*/ */
if (!vm->is_compute_context || !vm->process_info) if (!vm->is_compute_context || !vm->process_info)
return 0; return 0;
if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf)) if (!drm_gem_is_imported(obj) ||
!dma_buf_is_dynamic(obj->import_attach->dmabuf))
return 0; return 0;
mutex_lock_nested(&vm->process_info->lock, 1); mutex_lock_nested(&vm->process_info->lock, 1);
if (!WARN_ON(!vm->process_info->eviction_fence)) { if (!WARN_ON(!vm->process_info->eviction_fence)) {

View File

@ -1283,7 +1283,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct drm_gem_object *obj = &bo->tbo.base; struct drm_gem_object *obj = &bo->tbo.base;
if (drm_gem_is_imported(obj) && bo_va->is_xgmi) { if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
struct dma_buf *dma_buf = obj->dma_buf; struct dma_buf *dma_buf = obj->import_attach->dmabuf;
struct drm_gem_object *gobj = dma_buf->priv; struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);

View File

@ -7792,6 +7792,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
int ret; int ret;
if (WARN_ON(unlikely(!old_con_state || !new_con_state)))
return -EINVAL;
trace_amdgpu_dm_connector_atomic_check(new_con_state); trace_amdgpu_dm_connector_atomic_check(new_con_state);
if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {

View File

@ -299,6 +299,25 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
if (enable) { if (enable) {
struct dc *dc = adev->dm.dc;
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) ||
pr->config.replay_supported;
/*
* IPS & self-refresh feature can cause vblank counter resets between
* vblank disable and enable.
* It may cause system stuck due to waiting for the vblank counter.
* Call this function to estimate missed vblanks by using timestamps and
* update the vblank counter in DRM.
*/
if (dc->caps.ips_support &&
dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
sr_supported && vblank->config.disable_immediate)
drm_crtc_vblank_restore(crtc);
/* vblank irq on -> Only need vupdate irq in vrr mode */ /* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_crtc_vrr_active(acrtc_state)) if (amdgpu_dm_crtc_vrr_active(acrtc_state))
rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);

View File

@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id(
return object_id; return object_id;
} }
if (tbl->ucNumberOfObjects <= i) { if (tbl->ucNumberOfObjects <= i)
dm_error("Can't find connector id %d in connector table of size %d.\n",
i, tbl->ucNumberOfObjects);
return object_id; return object_id;
}
id = le16_to_cpu(tbl->asObjects[i].usObjectID); id = le16_to_cpu(tbl->asObjects[i].usObjectID);
object_id = object_id_from_bios_object_id(id); object_id = object_id_from_bios_object_id(id);

View File

@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3(
allocation.sPCLKInput.usFbDiv = allocation.sPCLKInput.usFbDiv =
cpu_to_le16((uint16_t)bp_params->feedback_divider); cpu_to_le16((uint16_t)bp_params->feedback_divider);
allocation.sPCLKInput.ucFracFbDiv = allocation.sPCLKInput.ucFracFbDiv =
(uint8_t)bp_params->fractional_feedback_divider; (uint8_t)(bp_params->fractional_feedback_divider / 100000);
allocation.sPCLKInput.ucPostDiv = allocation.sPCLKInput.ucPostDiv =
(uint8_t)bp_params->pixel_clock_post_divider; (uint8_t)bp_params->pixel_clock_post_divider;

View File

@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
/* ClocksStateLow */ /* ClocksStateLow */
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000}, { .display_clk_khz = 352000, .pixel_clk_khz = 330000},
/* ClocksStateNominal */ /* ClocksStateNominal */
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, { .display_clk_khz = 625000, .pixel_clk_khz = 400000 },
/* ClocksStatePerformance */ /* ClocksStatePerformance */
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; { .display_clk_khz = 625000, .pixel_clk_khz = 400000 } };
int dentist_get_divider_from_did(int did) int dentist_get_divider_from_did(int did)
{ {
@ -391,8 +391,6 @@ static void dce_pplib_apply_display_requirements(
{ {
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
dce110_fill_display_configs(context, pp_display_cfg); dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@ -405,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
{ {
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req; struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; const int max_disp_clk =
clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */ /* get max clock state from PPLIB */

View File

@ -120,9 +120,15 @@ void dce110_fill_display_configs(
const struct dc_state *context, const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg) struct dm_pp_display_configuration *pp_display_cfg)
{ {
struct dc *dc = context->clk_mgr->ctx->dc;
int j; int j;
int num_cfgs = 0; int num_cfgs = 0;
pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator;
for (j = 0; j < context->stream_count; j++) { for (j = 0; j < context->stream_count; j++) {
int k; int k;
@ -164,6 +170,23 @@ void dce110_fill_display_configs(
cfg->v_refresh /= stream->timing.h_total; cfg->v_refresh /= stream->timing.h_total;
cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
/ stream->timing.v_total; / stream->timing.v_total;
/* Find first CRTC index and calculate its line time.
* This is necessary for DPM on SI GPUs.
*/
if (cfg->pipe_idx < pp_display_cfg->crtc_index) {
const struct dc_crtc_timing *timing =
&context->streams[0]->timing;
pp_display_cfg->crtc_index = cfg->pipe_idx;
pp_display_cfg->line_time_in_us =
timing->h_total * 10000 / timing->pix_clk_100hz;
}
}
if (!num_cfgs) {
pp_display_cfg->crtc_index = 0;
pp_display_cfg->line_time_in_us = 0;
} }
pp_display_cfg->display_count = num_cfgs; pp_display_cfg->display_count = num_cfgs;
@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_deep_sleep_khz pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz; = context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
pp_display_cfg->avail_mclk_switch_time_us =
dce110_get_min_vblank_time_us(context);
/* TODO: dce11.2*/
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg); dce110_fill_display_configs(context, pp_display_cfg);
/* TODO: is this still applicable?*/
if (pp_display_cfg->display_count == 1) {
const struct dc_crtc_timing *timing =
&context->streams[0]->timing;
pp_display_cfg->crtc_index =
pp_display_cfg->disp_configs[0].pipe_idx;
pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
}
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
} }

View File

@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{ {
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider; struct dc_context *ctx = clk_mgr_base->ctx;
int dp_ref_clk_khz; int dp_ref_clk_khz = 0;
int target_div;
/* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */ if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev))
dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency;
/* Read the mmDENTIST_DISPCLK_CNTL to get the currently else
* programmed DID DENTIST_DPREFCLK_WDIVIDER*/ dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
target_div = dentist_get_divider_from_did(dprefclk_wdivider);
/* Calculate the current DFS clock, in kHz.*/
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz) / target_div;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
} }
@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements(
{ {
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
dce110_fill_display_configs(context, pp_display_cfg); dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base,
{ {
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req; struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; const int max_disp_clk =
clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */ /* get max clock state from PPLIB */

View File

@ -217,11 +217,24 @@ static bool create_links(
connectors_num, connectors_num,
num_virtual_links); num_virtual_links);
// condition loop on link_count to allow skipping invalid indices /* When getting the number of connectors, the VBIOS reports the number of valid indices,
* but it doesn't say which indices are valid, and not every index has an actual connector.
* So, if we don't find a connector on an index, that is not an error.
*
* - There is no guarantee that the first N indices will be valid
* - VBIOS may report a higher amount of valid indices than there are actual connectors
* - Some VBIOS have valid configurations for more connectors than there actually are
* on the card. This may be because the manufacturer used the same VBIOS for different
* variants of the same card.
*/
for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i);
struct link_init_data link_init_params = {0}; struct link_init_data link_init_params = {0};
struct dc_link *link; struct dc_link *link;
if (connector_id.id == CONNECTOR_ID_UNKNOWN)
continue;
DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
link_init_params.ctx = dc->ctx; link_init_params.ctx = dc->ctx;

View File

@ -896,13 +896,13 @@ void dce110_link_encoder_construct(
enc110->base.id, &bp_cap_info); enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */ /* Override features with DCE-specific values */
if (BP_RESULT_OK == result) { if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE = enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN; bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE = enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN; bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
} else { } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__, __func__,
result); result);
@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct(
enc110->base.id, &bp_cap_info); enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */ /* Override features with DCE-specific values */
if (BP_RESULT_OK == result) { if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE = enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN; bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE = enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN; bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
} else { } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__, __func__,
result); result);

View File

@ -4,7 +4,6 @@
#include "dc.h" #include "dc.h"
#include "dc_dmub_srv.h" #include "dc_dmub_srv.h"
#include "dc_dp_types.h"
#include "dmub/dmub_srv.h" #include "dmub/dmub_srv.h"
#include "core_types.h" #include "core_types.h"
#include "dmub_replay.h" #include "dmub_replay.h"
@ -44,45 +43,21 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s
/* /*
* Enable/Disable Replay. * Enable/Disable Replay.
*/ */
static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst, static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
struct dc_link *link)
{ {
union dmub_rb_cmd cmd; union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx; struct dc_context *dc = dmub->ctx;
uint32_t retry_count; uint32_t retry_count;
enum replay_state state = REPLAY_STATE_0; enum replay_state state = REPLAY_STATE_0;
struct pipe_ctx *pipe_ctx = NULL;
struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
uint8_t i;
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
cmd.replay_enable.header.type = DMUB_CMD__REPLAY; cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
cmd.replay_enable.data.panel_inst = panel_inst; cmd.replay_enable.data.panel_inst = panel_inst;
cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE; cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
if (enable) { if (enable)
cmd.replay_enable.data.enable = REPLAY_ENABLE; cmd.replay_enable.data.enable = REPLAY_ENABLE;
// hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay else
if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
for (i = 0; i < MAX_PIPES; i++) {
if (res_ctx &&
res_ctx->pipe_ctx[i].stream &&
res_ctx->pipe_ctx[i].stream->link &&
res_ctx->pipe_ctx[i].stream->link == link &&
res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
pipe_ctx = &res_ctx->pipe_ctx[i];
//TODO: refactor for multi edp support
break;
}
}
if (!pipe_ctx)
return;
cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
}
} else
cmd.replay_enable.data.enable = REPLAY_DISABLE; cmd.replay_enable.data.enable = REPLAY_DISABLE;
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data); cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
@ -174,17 +149,6 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->digbe_inst = replay_context->digbe_inst; copy_settings_data->digbe_inst = replay_context->digbe_inst;
copy_settings_data->digfe_inst = replay_context->digfe_inst; copy_settings_data->digfe_inst = replay_context->digfe_inst;
if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
if (pipe_ctx->stream_res.hpo_dp_stream_enc)
copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
else
copy_settings_data->hpo_stream_enc_inst = 0;
if (pipe_ctx->link_res.hpo_dp_link_enc)
copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
else
copy_settings_data->hpo_link_enc_inst = 0;
}
if (pipe_ctx->plane_res.dpp) if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else else
@ -247,7 +211,6 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.type = DMUB_CMD__REPLAY; pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL; pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16; pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;

View File

@ -19,7 +19,7 @@ struct dmub_replay_funcs {
void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state, void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
uint8_t panel_inst); uint8_t panel_inst);
void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait, void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
uint8_t panel_inst, struct dc_link *link); uint8_t panel_inst);
bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link, bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
struct replay_context *replay_context, uint8_t panel_inst); struct replay_context *replay_context, uint8_t panel_inst);
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt, void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,

View File

@ -944,7 +944,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
// TODO: Handle mux change case if force_static is set // TODO: Handle mux change case if force_static is set
// If force_static is set, just change the replay_allow_active state directly // If force_static is set, just change the replay_allow_active state directly
if (replay != NULL && link->replay_settings.replay_feature_enabled) if (replay != NULL && link->replay_settings.replay_feature_enabled)
replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link); replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst);
link->replay_settings.replay_allow_active = *allow_active; link->replay_settings.replay_allow_active = *allow_active;
} }

View File

@ -4047,14 +4047,6 @@ struct dmub_cmd_replay_copy_settings_data {
* DIG BE HW instance. * DIG BE HW instance.
*/ */
uint8_t digbe_inst; uint8_t digbe_inst;
/**
* @hpo_stream_enc_inst: HPO stream encoder instance
*/
uint8_t hpo_stream_enc_inst;
/**
* @hpo_link_enc_inst: HPO link encoder instance
*/
uint8_t hpo_link_enc_inst;
/** /**
* AUX HW instance. * AUX HW instance.
*/ */
@ -4159,18 +4151,6 @@ struct dmub_rb_cmd_replay_enable_data {
* This does not support HDMI/DP2 for now. * This does not support HDMI/DP2 for now.
*/ */
uint8_t phy_rate; uint8_t phy_rate;
/**
* @hpo_stream_enc_inst: HPO stream encoder instance
*/
uint8_t hpo_stream_enc_inst;
/**
* @hpo_link_enc_inst: HPO link encoder instance
*/
uint8_t hpo_link_enc_inst;
/**
* @pad: Align structure to 4 byte boundary.
*/
uint8_t pad[2];
}; };
/** /**

View File

@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_FAILURE; return MOD_HDCP_STATUS_FAILURE;
} }
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
mutex_lock(&psp->hdcp_context.mutex); mutex_lock(&psp->hdcp_context.mutex);

View File

@ -1697,9 +1697,11 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *min_power_limit) uint32_t *min_power_limit)
{ {
struct smu_table_context *table_context = &smu->smu_table; struct smu_table_context *table_context = &smu->smu_table;
struct smu_14_0_2_powerplay_table *powerplay_table =
table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable; PPTable_t *pptable = table_context->driver_pptable;
CustomSkuTable_t *skutable = &pptable->CustomSkuTable; CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
uint32_t power_limit; uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v14_0_get_current_power_limit(smu, &power_limit)) if (smu_v14_0_get_current_power_limit(smu, &power_limit))
@ -1712,11 +1714,29 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
if (default_power_limit) if (default_power_limit)
*default_power_limit = power_limit; *default_power_limit = power_limit;
if (max_power_limit) if (powerplay_table) {
*max_power_limit = msg_limit; if (smu->od_enabled &&
smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
od_percent_upper = pptable->SkuTable.OverDriveLimitsBasicMax.Ppt;
od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
} else if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
od_percent_upper = 0;
od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
}
}
if (min_power_limit) dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
*min_power_limit = 0; od_percent_upper, od_percent_lower, power_limit);
if (max_power_limit) {
*max_power_limit = msg_limit * (100 + od_percent_upper);
*max_power_limit /= 100;
}
if (min_power_limit) {
*min_power_limit = power_limit * (100 + od_percent_lower);
*min_power_limit /= 100;
}
return 0; return 0;
} }

View File

@ -1474,8 +1474,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge, dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge,
&analogix_dp_bridge_funcs); &analogix_dp_bridge_funcs);
if (!dp) if (IS_ERR(dp))
return ERR_PTR(-ENOMEM); return ERR_CAST(dp);
dp->dev = &pdev->dev; dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF; dp->dpms_mode = DRM_MODE_DPMS_OFF;

View File

@ -2432,6 +2432,8 @@ static const struct drm_gpuvm_ops lock_ops = {
* *
* The expected usage is: * The expected usage is:
* *
* .. code-block:: c
*
* vm_bind { * vm_bind {
* struct drm_exec exec; * struct drm_exec exec;
* *

View File

@ -381,6 +381,26 @@ struct DecFifo {
len: usize, len: usize,
} }
// On arm32 architecture, dividing an `u64` by a constant will generate a call
// to `__aeabi_uldivmod` which is not present in the kernel.
// So use the multiply by inverse method for this architecture.
fn div10(val: u64) -> u64 {
if cfg!(target_arch = "arm") {
let val_h = val >> 32;
let val_l = val & 0xFFFFFFFF;
let b_h: u64 = 0x66666666;
let b_l: u64 = 0x66666667;
let tmp1 = val_h * b_l + ((val_l * b_l) >> 32);
let tmp2 = val_l * b_h + (tmp1 & 0xffffffff);
let tmp3 = val_h * b_h + (tmp1 >> 32) + (tmp2 >> 32);
tmp3 >> 2
} else {
val / 10
}
}
impl DecFifo { impl DecFifo {
fn push(&mut self, data: u64, len: usize) { fn push(&mut self, data: u64, len: usize) {
let mut chunk = data; let mut chunk = data;
@ -389,7 +409,7 @@ impl DecFifo {
} }
for i in 0..len { for i in 0..len {
self.decimals[i] = (chunk % 10) as u8; self.decimals[i] = (chunk % 10) as u8;
chunk /= 10; chunk = div10(chunk);
} }
self.len += len; self.len += len;
} }

View File

@ -325,6 +325,17 @@ static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp)
return hibmc_dp_link_reduce_rate(dp); return hibmc_dp_link_reduce_rate(dp);
} }
static void hibmc_dp_update_caps(struct hibmc_dp_dev *dp)
{
dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
if (dp->link.cap.link_rate > DP_LINK_BW_8_1 || !dp->link.cap.link_rate)
dp->link.cap.link_rate = DP_LINK_BW_8_1;
dp->link.cap.lanes = dp->dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
if (dp->link.cap.lanes > HIBMC_DP_LANE_NUM_MAX)
dp->link.cap.lanes = HIBMC_DP_LANE_NUM_MAX;
}
int hibmc_dp_link_training(struct hibmc_dp_dev *dp) int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
{ {
struct hibmc_dp_link *link = &dp->link; struct hibmc_dp_link *link = &dp->link;
@ -334,8 +345,7 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
if (ret) if (ret)
drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret); drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE]; hibmc_dp_update_caps(dp);
dp->link.cap.lanes = 0x2;
ret = hibmc_dp_get_serdes_rate_cfg(dp); ret = hibmc_dp_get_serdes_rate_cfg(dp);
if (ret < 0) if (ret < 0)

View File

@ -32,7 +32,7 @@
DEFINE_DRM_GEM_FOPS(hibmc_fops); DEFINE_DRM_GEM_FOPS(hibmc_fops);
static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" }; static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "hibmc-vblank", "hibmc-hpd" };
static irqreturn_t hibmc_interrupt(int irq, void *arg) static irqreturn_t hibmc_interrupt(int irq, void *arg)
{ {
@ -115,6 +115,8 @@ static const struct drm_mode_config_funcs hibmc_mode_funcs = {
static int hibmc_kms_init(struct hibmc_drm_private *priv) static int hibmc_kms_init(struct hibmc_drm_private *priv)
{ {
struct drm_device *dev = &priv->dev; struct drm_device *dev = &priv->dev;
struct drm_encoder *encoder;
u32 clone_mask = 0;
int ret; int ret;
ret = drmm_mode_config_init(dev); ret = drmm_mode_config_init(dev);
@ -154,6 +156,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret; return ret;
} }
drm_for_each_encoder(encoder, dev)
clone_mask |= drm_encoder_mask(encoder);
drm_for_each_encoder(encoder, dev)
encoder->possible_clones = clone_mask;
return 0; return 0;
} }
@ -277,7 +285,6 @@ static void hibmc_unload(struct drm_device *dev)
static int hibmc_msi_init(struct drm_device *dev) static int hibmc_msi_init(struct drm_device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pdev = to_pci_dev(dev->dev);
char name[32] = {0};
int valid_irq_num; int valid_irq_num;
int irq; int irq;
int ret; int ret;
@ -292,9 +299,6 @@ static int hibmc_msi_init(struct drm_device *dev)
valid_irq_num = ret; valid_irq_num = ret;
for (int i = 0; i < valid_irq_num; i++) { for (int i = 0; i < valid_irq_num; i++) {
snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s",
dev->driver->name, pci_name(pdev), g_irqs_names_map[i]);
irq = pci_irq_vector(pdev, i); irq = pci_irq_vector(pdev, i);
if (i) if (i)
@ -302,10 +306,10 @@ static int hibmc_msi_init(struct drm_device *dev)
ret = devm_request_threaded_irq(&pdev->dev, irq, ret = devm_request_threaded_irq(&pdev->dev, irq,
hibmc_dp_interrupt, hibmc_dp_interrupt,
hibmc_dp_hpd_isr, hibmc_dp_hpd_isr,
IRQF_SHARED, name, dev); IRQF_SHARED, g_irqs_names_map[i], dev);
else else
ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt, ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
IRQF_SHARED, name, dev); IRQF_SHARED, g_irqs_names_map[i], dev);
if (ret) { if (ret) {
drm_err(dev, "install irq failed: %d\n", ret); drm_err(dev, "install irq failed: %d\n", ret);
return ret; return ret;
@ -323,13 +327,13 @@ static int hibmc_load(struct drm_device *dev)
ret = hibmc_hw_init(priv); ret = hibmc_hw_init(priv);
if (ret) if (ret)
goto err; return ret;
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0)); pci_resource_len(pdev, 0));
if (ret) { if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret); drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
goto err; return ret;
} }
ret = hibmc_kms_init(priv); ret = hibmc_kms_init(priv);

View File

@ -69,6 +69,7 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv); int hibmc_vdac_init(struct hibmc_drm_private *priv);
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector); int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
void hibmc_ddc_del(struct hibmc_vdac *vdac);
int hibmc_dp_init(struct hibmc_drm_private *priv); int hibmc_dp_init(struct hibmc_drm_private *priv);

View File

@ -95,3 +95,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
return i2c_bit_add_bus(&vdac->adapter); return i2c_bit_add_bus(&vdac->adapter);
} }
void hibmc_ddc_del(struct hibmc_vdac *vdac)
{
i2c_del_adapter(&vdac->adapter);
}

View File

@ -53,7 +53,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
{ {
struct hibmc_vdac *vdac = to_hibmc_vdac(connector); struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
i2c_del_adapter(&vdac->adapter); hibmc_ddc_del(vdac);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
} }
@ -110,7 +110,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL); ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
if (ret) { if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret); drm_err(dev, "failed to init encoder: %d\n", ret);
return ret; goto err;
} }
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs); drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
@ -121,7 +121,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
&vdac->adapter); &vdac->adapter);
if (ret) { if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret); drm_err(dev, "failed to init connector: %d\n", ret);
return ret; goto err;
} }
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs); drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
@ -131,4 +131,9 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0; return 0;
err:
hibmc_ddc_del(vdac);
return ret;
} }

View File

@ -1506,10 +1506,14 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl)
if (!(master_ctl & GEN11_GU_MISC_IRQ)) if (!(master_ctl & GEN11_GU_MISC_IRQ))
return 0; return 0;
intel_display_rpm_assert_block(display);
iir = intel_de_read(display, GEN11_GU_MISC_IIR); iir = intel_de_read(display, GEN11_GU_MISC_IIR);
if (likely(iir)) if (likely(iir))
intel_de_write(display, GEN11_GU_MISC_IIR, iir); intel_de_write(display, GEN11_GU_MISC_IIR, iir);
intel_display_rpm_assert_unblock(display);
return iir; return iir;
} }

View File

@ -23,6 +23,7 @@
#include "intel_modeset_lock.h" #include "intel_modeset_lock.h"
#include "intel_tc.h" #include "intel_tc.h"
#define DP_PIN_ASSIGNMENT_NONE 0x0
#define DP_PIN_ASSIGNMENT_C 0x3 #define DP_PIN_ASSIGNMENT_C 0x3
#define DP_PIN_ASSIGNMENT_D 0x4 #define DP_PIN_ASSIGNMENT_D 0x4
#define DP_PIN_ASSIGNMENT_E 0x5 #define DP_PIN_ASSIGNMENT_E 0x5
@ -66,6 +67,7 @@ struct intel_tc_port {
enum tc_port_mode init_mode; enum tc_port_mode init_mode;
enum phy_fia phy_fia; enum phy_fia phy_fia;
u8 phy_fia_idx; u8 phy_fia_idx;
u8 max_lane_count;
}; };
static enum intel_display_power_domain static enum intel_display_power_domain
@ -307,6 +309,8 @@ static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val); REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
switch (pin_assignment) { switch (pin_assignment) {
case DP_PIN_ASSIGNMENT_NONE:
return 0;
default: default:
MISSING_CASE(pin_assignment); MISSING_CASE(pin_assignment);
fallthrough; fallthrough;
@ -365,12 +369,12 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
} }
} }
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) static int get_max_lane_count(struct intel_tc_port *tc)
{ {
struct intel_display *display = to_intel_display(dig_port); struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port); struct intel_digital_port *dig_port = tc->dig_port;
if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT) if (tc->mode != TC_PORT_DP_ALT)
return 4; return 4;
assert_tc_cold_blocked(tc); assert_tc_cold_blocked(tc);
@ -384,6 +388,25 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
return intel_tc_port_get_max_lane_count(dig_port); return intel_tc_port_get_max_lane_count(dig_port);
} }
static void read_pin_configuration(struct intel_tc_port *tc)
{
tc->max_lane_count = get_max_lane_count(tc);
}
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
if (!intel_encoder_is_tc(&dig_port->base))
return 4;
if (DISPLAY_VER(display) < 20)
return get_max_lane_count(tc);
return tc->max_lane_count;
}
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes) int required_lanes)
{ {
@ -596,9 +619,12 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
tc_cold_wref = __tc_cold_block(tc, &domain); tc_cold_wref = __tc_cold_block(tc, &domain);
tc->mode = tc_phy_get_current_mode(tc); tc->mode = tc_phy_get_current_mode(tc);
if (tc->mode != TC_PORT_DISCONNECTED) if (tc->mode != TC_PORT_DISCONNECTED) {
tc->lock_wakeref = tc_cold_block(tc); tc->lock_wakeref = tc_cold_block(tc);
read_pin_configuration(tc);
}
__tc_cold_unblock(tc, domain, tc_cold_wref); __tc_cold_unblock(tc, domain, tc_cold_wref);
} }
@ -656,8 +682,11 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
tc->lock_wakeref = tc_cold_block(tc); tc->lock_wakeref = tc_cold_block(tc);
if (tc->mode == TC_PORT_TBT_ALT) if (tc->mode == TC_PORT_TBT_ALT) {
read_pin_configuration(tc);
return true; return true;
}
if ((!tc_phy_is_ready(tc) || if ((!tc_phy_is_ready(tc) ||
!icl_tc_phy_take_ownership(tc, true)) && !icl_tc_phy_take_ownership(tc, true)) &&
@ -668,6 +697,7 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
goto out_unblock_tc_cold; goto out_unblock_tc_cold;
} }
read_pin_configuration(tc);
if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
goto out_release_phy; goto out_release_phy;
@ -858,9 +888,12 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
port_wakeref = intel_display_power_get(display, port_power_domain); port_wakeref = intel_display_power_get(display, port_power_domain);
tc->mode = tc_phy_get_current_mode(tc); tc->mode = tc_phy_get_current_mode(tc);
if (tc->mode != TC_PORT_DISCONNECTED) if (tc->mode != TC_PORT_DISCONNECTED) {
tc->lock_wakeref = tc_cold_block(tc); tc->lock_wakeref = tc_cold_block(tc);
read_pin_configuration(tc);
}
intel_display_power_put(display, port_power_domain, port_wakeref); intel_display_power_put(display, port_power_domain, port_wakeref);
} }
@ -873,6 +906,9 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
if (tc->mode == TC_PORT_TBT_ALT) { if (tc->mode == TC_PORT_TBT_ALT) {
tc->lock_wakeref = tc_cold_block(tc); tc->lock_wakeref = tc_cold_block(tc);
read_pin_configuration(tc);
return true; return true;
} }
@ -894,6 +930,8 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
tc->lock_wakeref = tc_cold_block(tc); tc->lock_wakeref = tc_cold_block(tc);
read_pin_configuration(tc);
if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
goto out_unblock_tc_cold; goto out_unblock_tc_cold;
@ -1124,9 +1162,18 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
tc_cold_wref = __tc_cold_block(tc, &domain); tc_cold_wref = __tc_cold_block(tc, &domain);
tc->mode = tc_phy_get_current_mode(tc); tc->mode = tc_phy_get_current_mode(tc);
if (tc->mode != TC_PORT_DISCONNECTED) if (tc->mode != TC_PORT_DISCONNECTED) {
tc->lock_wakeref = tc_cold_block(tc); tc->lock_wakeref = tc_cold_block(tc);
read_pin_configuration(tc);
/*
* Set a valid lane count value for a DP-alt sink which got
* disconnected. The driver can only disable the output on this PHY.
*/
if (tc->max_lane_count == 0)
tc->max_lane_count = 4;
}
drm_WARN_ON(display->drm, drm_WARN_ON(display->drm,
(tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) && (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
!xelpdp_tc_phy_tcss_power_is_enabled(tc)); !xelpdp_tc_phy_tcss_power_is_enabled(tc));
@ -1138,14 +1185,19 @@ static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{ {
tc->lock_wakeref = tc_cold_block(tc); tc->lock_wakeref = tc_cold_block(tc);
if (tc->mode == TC_PORT_TBT_ALT) if (tc->mode == TC_PORT_TBT_ALT) {
read_pin_configuration(tc);
return true; return true;
}
if (!xelpdp_tc_phy_enable_tcss_power(tc, true)) if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
goto out_unblock_tccold; goto out_unblock_tccold;
xelpdp_tc_phy_take_ownership(tc, true); xelpdp_tc_phy_take_ownership(tc, true);
read_pin_configuration(tc);
if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
goto out_release_phy; goto out_release_phy;
@ -1226,14 +1278,19 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc)
tc->phy_ops->get_hw_state(tc); tc->phy_ops->get_hw_state(tc);
} }
static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, /* Is the PHY owned by display i.e. is it in legacy or DP-alt mode? */
bool phy_is_ready, bool phy_is_owned) static bool tc_phy_owned_by_display(struct intel_tc_port *tc,
bool phy_is_ready, bool phy_is_owned)
{ {
struct intel_display *display = to_intel_display(tc->dig_port); struct intel_display *display = to_intel_display(tc->dig_port);
drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready); if (DISPLAY_VER(display) < 20) {
drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
return phy_is_ready && phy_is_owned; return phy_is_ready && phy_is_owned;
} else {
return phy_is_owned;
}
} }
static bool tc_phy_is_connected(struct intel_tc_port *tc, static bool tc_phy_is_connected(struct intel_tc_port *tc,
@ -1244,7 +1301,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
bool phy_is_owned = tc_phy_is_owned(tc); bool phy_is_owned = tc_phy_is_owned(tc);
bool is_connected; bool is_connected;
if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) if (tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned))
is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY; is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
else else
is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
@ -1352,7 +1409,7 @@ tc_phy_get_current_mode(struct intel_tc_port *tc)
phy_is_ready = tc_phy_is_ready(tc); phy_is_ready = tc_phy_is_ready(tc);
phy_is_owned = tc_phy_is_owned(tc); phy_is_owned = tc_phy_is_owned(tc);
if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { if (!tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) {
mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
} else { } else {
drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT); drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
@ -1441,11 +1498,11 @@ static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
intel_display_power_flush_work(display); intel_display_power_flush_work(display);
if (!intel_tc_cold_requires_aux_pw(dig_port)) { if (!intel_tc_cold_requires_aux_pw(dig_port)) {
enum intel_display_power_domain aux_domain; enum intel_display_power_domain aux_domain;
bool aux_powered;
aux_domain = intel_aux_power_domain(dig_port); aux_domain = intel_aux_power_domain(dig_port);
aux_powered = intel_display_power_is_enabled(display, aux_domain); if (intel_display_power_is_enabled(display, aux_domain))
drm_WARN_ON(display->drm, aux_powered); drm_dbg_kms(display->drm, "Port %s: AUX unexpectedly powered\n",
tc->port_name);
} }
tc_phy_disconnect(tc); tc_phy_disconnect(tc);

View File

@ -634,6 +634,8 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal) struct i915_wa_list *wal)
{ {
struct drm_i915_private *i915 = engine->i915;
/* Wa_1406697149 (WaDisableBankHangMode:icl) */ /* Wa_1406697149 (WaDisableBankHangMode:icl) */
wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL); wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
@ -669,6 +671,15 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_1406306137:icl,ehl */ /* Wa_1406306137:icl,ehl */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU); wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
/*
* Disable Repacking for Compression (masked R/W access)
* before rendering compressed surfaces for display.
*/
wa_masked_en(wal, CACHE_MODE_0_GEN7,
DISABLE_REPACKING_FOR_COMPRESSION);
}
} }
/* /*
@ -2306,15 +2317,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_RC_SEMA_IDLE_MSG_DISABLE); GEN8_RC_SEMA_IDLE_MSG_DISABLE);
} }
if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
/*
* "Disable Repacking for Compression (masked R/W access)
* before rendering compressed surfaces for display."
*/
wa_masked_en(wal, CACHE_MODE_0_GEN7,
DISABLE_REPACKING_FOR_COMPRESSION);
}
if (GRAPHICS_VER(i915) == 11) { if (GRAPHICS_VER(i915) == 11) {
/* This is not an Wa. Enable for better image quality */ /* This is not an Wa. Enable for better image quality */
wa_masked_en(wal, wa_masked_en(wal,

View File

@ -60,14 +60,14 @@
* virtual address in the GPU's VA space there is no guarantee that the actual * virtual address in the GPU's VA space there is no guarantee that the actual
* mappings are created in the GPU's MMU. If the given memory is swapped out * mappings are created in the GPU's MMU. If the given memory is swapped out
* at the time the bind operation is executed the kernel will stash the mapping * at the time the bind operation is executed the kernel will stash the mapping
* details into it's internal alloctor and create the actual MMU mappings once * details into it's internal allocator and create the actual MMU mappings once
* the memory is swapped back in. While this is transparent for userspace, it is * the memory is swapped back in. While this is transparent for userspace, it is
* guaranteed that all the backing memory is swapped back in and all the memory * guaranteed that all the backing memory is swapped back in and all the memory
* mappings, as requested by userspace previously, are actually mapped once the * mappings, as requested by userspace previously, are actually mapped once the
* DRM_NOUVEAU_EXEC ioctl is called to submit an exec job. * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
* *
* A VM_BIND job can be executed either synchronously or asynchronously. If * A VM_BIND job can be executed either synchronously or asynchronously. If
* exectued asynchronously, userspace may provide a list of syncobjs this job * executed asynchronously, userspace may provide a list of syncobjs this job
* will wait for and/or a list of syncobj the kernel will signal once the * will wait for and/or a list of syncobj the kernel will signal once the
* VM_BIND job finished execution. If executed synchronously the ioctl will * VM_BIND job finished execution. If executed synchronously the ioctl will
* block until the bind job is finished. For synchronous jobs the kernel will * block until the bind job is finished. For synchronous jobs the kernel will
@ -82,7 +82,7 @@
* Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
* an up to date view of the VA space. However, the actual mappings might still * an up to date view of the VA space. However, the actual mappings might still
* be pending. Hence, EXEC jobs require to have the particular fences - of * be pending. Hence, EXEC jobs require to have the particular fences - of
* the corresponding VM_BIND jobs they depent on - attached to them. * the corresponding VM_BIND jobs they depend on - attached to them.
*/ */
static int static int

View File

@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break; case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
default: default:
WARN_ON(1); WARN_ON(1);
return -EINVAL; ret = -EINVAL;
goto done;
} }
memcpy(args->data, argv, argc); memcpy(args->data, argv, argc);

View File

@ -325,7 +325,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries); rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
if (IS_ERR_OR_NULL(rpc)) { if (IS_ERR_OR_NULL(rpc)) {
kfree(buf); kvfree(buf);
return rpc; return rpc;
} }
@ -334,7 +334,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
rpc = r535_gsp_msgq_recv_one_elem(gsp, &info); rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
if (IS_ERR_OR_NULL(rpc)) { if (IS_ERR_OR_NULL(rpc)) {
kfree(buf); kvfree(buf);
return rpc; return rpc;
} }

View File

@ -39,7 +39,8 @@ impl File {
_ => return Err(EINVAL), _ => return Err(EINVAL),
}; };
getparam.set_value(value); #[allow(clippy::useless_conversion)]
getparam.set_value(value.into());
Ok(0) Ok(0)
} }

View File

@ -53,6 +53,7 @@ config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP" bool "Rockchip cdn DP"
depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m) depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
select DRM_DISPLAY_HELPER select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_DP_HELPER
help help
This selects support for Rockchip SoC specific extensions This selects support for Rockchip SoC specific extensions

View File

@ -2579,12 +2579,13 @@ static int vop2_win_init(struct vop2 *vop2)
} }
/* /*
* The window registers are only updated when config done is written. * The window and video port registers are only updated when config
* Until that they read back the old value. As we read-modify-write * done is written. Until that they read back the old value. As we
* these registers mark them as non-volatile. This makes sure we read * read-modify-write these registers mark them as non-volatile. This
* the new values from the regmap register cache. * makes sure we read the new values from the regmap register cache.
*/ */
static const struct regmap_range vop2_nonvolatile_range[] = { static const struct regmap_range vop2_nonvolatile_range[] = {
regmap_reg_range(RK3568_VP0_CTRL_BASE, RK3588_VP3_CTRL_BASE + 255),
regmap_reg_range(0x1000, 0x23ff), regmap_reg_range(0x1000, 0x23ff),
}; };

View File

@ -1033,13 +1033,14 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
NULL : &result->dst_pitch; NULL : &result->dst_pitch;
drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state); drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32)); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */ buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size); memset(buf, 0, dst_size);
drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state); drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
} }

View File

@ -408,7 +408,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
/* Special layout, prepared below.. */ /* Special layout, prepared below.. */
vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION | vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
XE_VM_FLAG_SET_TILE_ID(tile)); XE_VM_FLAG_SET_TILE_ID(tile), NULL);
if (IS_ERR(vm)) if (IS_ERR(vm))
return ERR_CAST(vm); return ERR_CAST(vm);

View File

@ -101,7 +101,7 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
xe_assert(xe, hwe); xe_assert(xe, hwe);
/* PXP instructions must be issued from PPGTT */ /* PXP instructions must be issued from PPGTT */
vm = xe_vm_create(xe, XE_VM_FLAG_GSC); vm = xe_vm_create(xe, XE_VM_FLAG_GSC, NULL);
if (IS_ERR(vm)) if (IS_ERR(vm))
return PTR_ERR(vm); return PTR_ERR(vm);

View File

@ -1640,7 +1640,7 @@ static void xe_vm_free_scratch(struct xe_vm *vm)
} }
} }
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
{ {
struct drm_gem_object *vm_resv_obj; struct drm_gem_object *vm_resv_obj;
struct xe_vm *vm; struct xe_vm *vm;
@ -1661,9 +1661,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->xe = xe; vm->xe = xe;
vm->size = 1ull << xe->info.va_bits; vm->size = 1ull << xe->info.va_bits;
vm->flags = flags; vm->flags = flags;
if (xef)
vm->xef = xe_file_get(xef);
/** /**
* GSC VMs are kernel-owned, only used for PXP ops and can sometimes be * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be
* manipulated under the PXP mutex. However, the PXP mutex can be taken * manipulated under the PXP mutex. However, the PXP mutex can be taken
@ -1794,6 +1795,20 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
if (number_tiles > 1) if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1); vm->composite_fence_ctx = dma_fence_context_alloc(1);
if (xef && xe->info.has_asid) {
u32 asid;
down_write(&xe->usm.lock);
err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
XA_LIMIT(1, XE_MAX_ASID - 1),
&xe->usm.next_asid, GFP_KERNEL);
up_write(&xe->usm.lock);
if (err < 0)
goto err_unlock_close;
vm->usm.asid = asid;
}
trace_xe_vm_create(vm); trace_xe_vm_create(vm);
return vm; return vm;
@ -1814,6 +1829,8 @@ err_no_resv:
for_each_tile(tile, xe, id) for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]); xe_range_fence_tree_fini(&vm->rftree[id]);
ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
if (vm->xef)
xe_file_put(vm->xef);
kfree(vm); kfree(vm);
if (flags & XE_VM_FLAG_LR_MODE) if (flags & XE_VM_FLAG_LR_MODE)
xe_pm_runtime_put(xe); xe_pm_runtime_put(xe);
@ -2059,9 +2076,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev); struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file); struct xe_file *xef = to_xe_file(file);
struct drm_xe_vm_create *args = data; struct drm_xe_vm_create *args = data;
struct xe_tile *tile;
struct xe_vm *vm; struct xe_vm *vm;
u32 id, asid; u32 id;
int err; int err;
u32 flags = 0; u32 flags = 0;
@ -2097,29 +2113,10 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
flags |= XE_VM_FLAG_FAULT_MODE; flags |= XE_VM_FLAG_FAULT_MODE;
vm = xe_vm_create(xe, flags); vm = xe_vm_create(xe, flags, xef);
if (IS_ERR(vm)) if (IS_ERR(vm))
return PTR_ERR(vm); return PTR_ERR(vm);
if (xe->info.has_asid) {
down_write(&xe->usm.lock);
err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
XA_LIMIT(1, XE_MAX_ASID - 1),
&xe->usm.next_asid, GFP_KERNEL);
up_write(&xe->usm.lock);
if (err < 0)
goto err_close_and_put;
vm->usm.asid = asid;
}
vm->xef = xe_file_get(xef);
/* Record BO memory for VM pagetable created against client */
for_each_tile(tile, xe, id)
if (vm->pt_root[id])
xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM) #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
/* Warning: Security issue - never enable by default */ /* Warning: Security issue - never enable by default */
args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
@ -3421,6 +3418,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
free_bind_ops: free_bind_ops:
if (args->num_binds > 1) if (args->num_binds > 1)
kvfree(*bind_ops); kvfree(*bind_ops);
*bind_ops = NULL;
return err; return err;
} }
@ -3527,7 +3525,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct xe_exec_queue *q = NULL; struct xe_exec_queue *q = NULL;
u32 num_syncs, num_ufence = 0; u32 num_syncs, num_ufence = 0;
struct xe_sync_entry *syncs = NULL; struct xe_sync_entry *syncs = NULL;
struct drm_xe_vm_bind_op *bind_ops; struct drm_xe_vm_bind_op *bind_ops = NULL;
struct xe_vma_ops vops; struct xe_vma_ops vops;
struct dma_fence *fence; struct dma_fence *fence;
int err; int err;

View File

@ -26,7 +26,7 @@ struct xe_sync_entry;
struct xe_svm_range; struct xe_svm_range;
struct drm_exec; struct drm_exec;
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags); struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef);
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id); struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node); int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);

View File

@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map)
*/ */
static inline void iosys_map_clear(struct iosys_map *map) static inline void iosys_map_clear(struct iosys_map *map)
{ {
if (map->is_iomem) { memset(map, 0, sizeof(*map));
map->vaddr_iomem = NULL;
map->is_iomem = false;
} else {
map->vaddr = NULL;
}
} }
/** /**

View File

@ -43,17 +43,6 @@ pub struct Vmalloc;
/// For more details see [self]. /// For more details see [self].
pub struct KVmalloc; pub struct KVmalloc;
/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
fn aligned_size(new_layout: Layout) -> usize {
// Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
let layout = new_layout.pad_to_align();
// Note that `layout.size()` (after padding) is guaranteed to be a multiple of `layout.align()`
// which together with the slab guarantees means the `krealloc` will return a properly aligned
// object (see comments in `kmalloc()` for more information).
layout.size()
}
/// # Invariants /// # Invariants
/// ///
/// One of the following: `krealloc`, `vrealloc`, `kvrealloc`. /// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
@ -88,7 +77,7 @@ impl ReallocFunc {
old_layout: Layout, old_layout: Layout,
flags: Flags, flags: Flags,
) -> Result<NonNull<[u8]>, AllocError> { ) -> Result<NonNull<[u8]>, AllocError> {
let size = aligned_size(layout); let size = layout.size();
let ptr = match ptr { let ptr = match ptr {
Some(ptr) => { Some(ptr) => {
if old_layout.size() == 0 { if old_layout.size() == 0 {
@ -123,6 +112,17 @@ impl ReallocFunc {
} }
} }
impl Kmalloc {
/// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
/// `layout`.
pub fn aligned_layout(layout: Layout) -> Layout {
// Note that `layout.size()` (after padding) is guaranteed to be a multiple of
// `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
// a properly aligned object (see comments in `kmalloc()` for more information).
layout.pad_to_align()
}
}
// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that // SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
// - memory remains valid until it is explicitly freed, // - memory remains valid until it is explicitly freed,
// - passing a pointer to a valid memory allocation is OK, // - passing a pointer to a valid memory allocation is OK,
@ -135,6 +135,8 @@ unsafe impl Allocator for Kmalloc {
old_layout: Layout, old_layout: Layout,
flags: Flags, flags: Flags,
) -> Result<NonNull<[u8]>, AllocError> { ) -> Result<NonNull<[u8]>, AllocError> {
let layout = Kmalloc::aligned_layout(layout);
// SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`. // SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`.
unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) } unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }
} }
@ -176,6 +178,10 @@ unsafe impl Allocator for KVmalloc {
old_layout: Layout, old_layout: Layout,
flags: Flags, flags: Flags,
) -> Result<NonNull<[u8]>, AllocError> { ) -> Result<NonNull<[u8]>, AllocError> {
// `KVmalloc` may use the `Kmalloc` backend, hence we have to enforce a `Kmalloc`
// compatible layout.
let layout = Kmalloc::aligned_layout(layout);
// TODO: Support alignments larger than PAGE_SIZE. // TODO: Support alignments larger than PAGE_SIZE.
if layout.align() > bindings::PAGE_SIZE { if layout.align() > bindings::PAGE_SIZE {
pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n"); pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");

View File

@ -22,6 +22,17 @@ pub type Kmalloc = Cmalloc;
pub type Vmalloc = Kmalloc; pub type Vmalloc = Kmalloc;
pub type KVmalloc = Kmalloc; pub type KVmalloc = Kmalloc;
impl Cmalloc {
/// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
/// `layout`.
pub fn aligned_layout(layout: Layout) -> Layout {
// Note that `layout.size()` (after padding) is guaranteed to be a multiple of
// `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
// a properly aligned object (see comments in `kmalloc()` for more information).
layout.pad_to_align()
}
}
extern "C" { extern "C" {
#[link_name = "aligned_alloc"] #[link_name = "aligned_alloc"]
fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void; fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;

View File

@ -5,6 +5,7 @@
//! C header: [`include/linux/drm/drm_device.h`](srctree/include/linux/drm/drm_device.h) //! C header: [`include/linux/drm/drm_device.h`](srctree/include/linux/drm/drm_device.h)
use crate::{ use crate::{
alloc::allocator::Kmalloc,
bindings, device, drm, bindings, device, drm,
drm::driver::AllocImpl, drm::driver::AllocImpl,
error::from_err_ptr, error::from_err_ptr,
@ -12,7 +13,7 @@ use crate::{
prelude::*, prelude::*,
types::{ARef, AlwaysRefCounted, Opaque}, types::{ARef, AlwaysRefCounted, Opaque},
}; };
use core::{mem, ops::Deref, ptr, ptr::NonNull}; use core::{alloc::Layout, mem, ops::Deref, ptr, ptr::NonNull};
#[cfg(CONFIG_DRM_LEGACY)] #[cfg(CONFIG_DRM_LEGACY)]
macro_rules! drm_legacy_fields { macro_rules! drm_legacy_fields {
@ -53,10 +54,8 @@ macro_rules! drm_legacy_fields {
/// ///
/// `self.dev` is a valid instance of a `struct device`. /// `self.dev` is a valid instance of a `struct device`.
#[repr(C)] #[repr(C)]
#[pin_data]
pub struct Device<T: drm::Driver> { pub struct Device<T: drm::Driver> {
dev: Opaque<bindings::drm_device>, dev: Opaque<bindings::drm_device>,
#[pin]
data: T::Data, data: T::Data,
} }
@ -96,6 +95,10 @@ impl<T: drm::Driver> Device<T> {
/// Create a new `drm::Device` for a `drm::Driver`. /// Create a new `drm::Device` for a `drm::Driver`.
pub fn new(dev: &device::Device, data: impl PinInit<T::Data, Error>) -> Result<ARef<Self>> { pub fn new(dev: &device::Device, data: impl PinInit<T::Data, Error>) -> Result<ARef<Self>> {
// `__drm_dev_alloc` uses `kmalloc()` to allocate memory, hence ensure a `kmalloc()`
// compatible `Layout`.
let layout = Kmalloc::aligned_layout(Layout::new::<Self>());
// SAFETY: // SAFETY:
// - `VTABLE`, as a `const` is pinned to the read-only section of the compilation, // - `VTABLE`, as a `const` is pinned to the read-only section of the compilation,
// - `dev` is valid by its type invarants, // - `dev` is valid by its type invarants,
@ -103,7 +106,7 @@ impl<T: drm::Driver> Device<T> {
bindings::__drm_dev_alloc( bindings::__drm_dev_alloc(
dev.as_raw(), dev.as_raw(),
&Self::VTABLE, &Self::VTABLE,
mem::size_of::<Self>(), layout.size(),
mem::offset_of!(Self, dev), mem::offset_of!(Self, dev),
) )
} }
@ -117,9 +120,13 @@ impl<T: drm::Driver> Device<T> {
// - `raw_data` is a valid pointer to uninitialized memory. // - `raw_data` is a valid pointer to uninitialized memory.
// - `raw_data` will not move until it is dropped. // - `raw_data` will not move until it is dropped.
unsafe { data.__pinned_init(raw_data) }.inspect_err(|_| { unsafe { data.__pinned_init(raw_data) }.inspect_err(|_| {
// SAFETY: `__drm_dev_alloc()` was successful, hence `raw_drm` must be valid and the // SAFETY: `raw_drm` is a valid pointer to `Self`, given that `__drm_dev_alloc` was
// successful.
let drm_dev = unsafe { Self::into_drm_device(raw_drm) };
// SAFETY: `__drm_dev_alloc()` was successful, hence `drm_dev` must be valid and the
// refcount must be non-zero. // refcount must be non-zero.
unsafe { bindings::drm_dev_put(ptr::addr_of_mut!((*raw_drm.as_ptr()).dev).cast()) }; unsafe { bindings::drm_dev_put(drm_dev) };
})?; })?;
// SAFETY: The reference count is one, and now we take ownership of that reference as a // SAFETY: The reference count is one, and now we take ownership of that reference as a
@ -140,6 +147,14 @@ impl<T: drm::Driver> Device<T> {
unsafe { crate::container_of!(Opaque::cast_from(ptr), Self, dev) }.cast_mut() unsafe { crate::container_of!(Opaque::cast_from(ptr), Self, dev) }.cast_mut()
} }
/// # Safety
///
/// `ptr` must be a valid pointer to `Self`.
unsafe fn into_drm_device(ptr: NonNull<Self>) -> *mut bindings::drm_device {
// SAFETY: By the safety requirements of this function, `ptr` is a valid pointer to `Self`.
unsafe { &raw mut (*ptr.as_ptr()).dev }.cast()
}
/// Not intended to be called externally, except via declare_drm_ioctls!() /// Not intended to be called externally, except via declare_drm_ioctls!()
/// ///
/// # Safety /// # Safety
@ -189,8 +204,11 @@ unsafe impl<T: drm::Driver> AlwaysRefCounted for Device<T> {
} }
unsafe fn dec_ref(obj: NonNull<Self>) { unsafe fn dec_ref(obj: NonNull<Self>) {
// SAFETY: `obj` is a valid pointer to `Self`.
let drm_dev = unsafe { Self::into_drm_device(obj) };
// SAFETY: The safety requirements guarantee that the refcount is non-zero. // SAFETY: The safety requirements guarantee that the refcount is non-zero.
unsafe { bindings::drm_dev_put(obj.cast().as_ptr()) }; unsafe { bindings::drm_dev_put(drm_dev) };
} }
} }