Merge tag 'drm-misc-fixes-2026-03-19' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

A doc warning fix and a memory leak fix for vmwgfx, a deadlock fix and
interrupt handling fixes for imagination, a locking fix for
pagemap_until, a UAF fix for drm_dev_unplug, and a multi-channel audio
handling fix for dw-hdmi-qp.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patch.msgid.link/20260319-lush-righteous-malamute-e7bb98@houat
This commit is contained in:
Dave Airlie
2026-03-21 01:52:29 +10:00
8 changed files with 114 additions and 80 deletions

View File

@@ -848,7 +848,7 @@ static int dw_hdmi_qp_config_audio_infoframe(struct dw_hdmi_qp *hdmi,
regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1);
regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1);
regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[4], 1);
regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[7], 1);
/* Enable ACR, AUDI, AMD */
dw_hdmi_qp_mod(hdmi,

View File

@@ -233,6 +233,7 @@ static void drm_events_release(struct drm_file *file_priv)
void drm_file_free(struct drm_file *file)
{
struct drm_device *dev;
int idx;
if (!file)
return;
@@ -249,9 +250,11 @@ void drm_file_free(struct drm_file *file)
drm_events_release(file);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
drm_dev_enter(dev, &idx)) {
drm_fb_release(file);
drm_property_destroy_user_blobs(dev, file);
drm_dev_exit(idx);
}
if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))

View File

@@ -577,10 +577,13 @@ void drm_mode_config_cleanup(struct drm_device *dev)
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
if (list_empty(&fb->filp_head) || drm_framebuffer_read_refcount(fb) > 1) {
struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
drm_framebuffer_print_info(&p, 1, fb);
drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
drm_framebuffer_print_info(&p, 1, fb);
}
list_del_init(&fb->filp_head);
drm_framebuffer_free(&fb->base.refcount);
}

View File

@@ -65,18 +65,14 @@ static void drm_pagemap_cache_fini(void *arg)
drm_dbg(cache->shrinker->drm, "Destroying dpagemap cache.\n");
spin_lock(&cache->lock);
dpagemap = cache->dpagemap;
if (!dpagemap) {
spin_unlock(&cache->lock);
goto out;
}
cache->dpagemap = NULL;
if (dpagemap && !drm_pagemap_shrinker_cancel(dpagemap))
dpagemap = NULL;
spin_unlock(&cache->lock);
if (drm_pagemap_shrinker_cancel(dpagemap)) {
cache->dpagemap = NULL;
spin_unlock(&cache->lock);
if (dpagemap)
drm_pagemap_destroy(dpagemap, false);
}
out:
mutex_destroy(&cache->lookup_mutex);
kfree(cache);
}

View File

@@ -225,29 +225,12 @@ static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
}
if (pvr_dev->has_safety_events) {
int err;
/*
* Ensure the GPU is powered on since some safety events (such
* as ECC faults) can happen outside of job submissions, which
* are otherwise the only time a power reference is held.
*/
err = pvr_power_get(pvr_dev);
if (err) {
drm_err_ratelimited(drm_dev,
"%s: could not take power reference (%d)\n",
__func__, err);
return ret;
}
while (pvr_device_safety_irq_pending(pvr_dev)) {
pvr_device_safety_irq_clear(pvr_dev);
pvr_device_handle_safety_events(pvr_dev);
ret = IRQ_HANDLED;
}
pvr_power_put(pvr_dev);
}
return ret;

View File

@@ -90,11 +90,11 @@ pvr_power_request_pwr_off(struct pvr_device *pvr_dev)
}
static int
pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset)
pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset, bool rpm_suspend)
{
if (!hard_reset) {
int err;
int err;
if (!hard_reset) {
cancel_delayed_work_sync(&pvr_dev->watchdog.work);
err = pvr_power_request_idle(pvr_dev);
@@ -106,29 +106,47 @@ pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset)
return err;
}
return pvr_fw_stop(pvr_dev);
if (rpm_suspend) {
/* This also waits for late processing of GPU or firmware IRQs in other cores */
disable_irq(pvr_dev->irq);
}
err = pvr_fw_stop(pvr_dev);
if (err && rpm_suspend)
enable_irq(pvr_dev->irq);
return err;
}
static int
pvr_power_fw_enable(struct pvr_device *pvr_dev)
pvr_power_fw_enable(struct pvr_device *pvr_dev, bool rpm_resume)
{
int err;
if (rpm_resume)
enable_irq(pvr_dev->irq);
err = pvr_fw_start(pvr_dev);
if (err)
return err;
goto out;
err = pvr_wait_for_fw_boot(pvr_dev);
if (err) {
drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
pvr_fw_stop(pvr_dev);
return err;
goto out;
}
queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
msecs_to_jiffies(WATCHDOG_TIME_MS));
return 0;
out:
if (rpm_resume)
disable_irq(pvr_dev->irq);
return err;
}
bool
@@ -361,7 +379,7 @@ pvr_power_device_suspend(struct device *dev)
return -EIO;
if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_disable(pvr_dev, false);
err = pvr_power_fw_disable(pvr_dev, false, true);
if (err)
goto err_drm_dev_exit;
}
@@ -391,7 +409,7 @@ pvr_power_device_resume(struct device *dev)
goto err_drm_dev_exit;
if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_enable(pvr_dev);
err = pvr_power_fw_enable(pvr_dev, true);
if (err)
goto err_power_off;
}
@@ -510,7 +528,16 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
}
/* Disable IRQs for the duration of the reset. */
disable_irq(pvr_dev->irq);
if (hard_reset) {
disable_irq(pvr_dev->irq);
} else {
/*
* Soft reset is triggered as a response to a FW command to the Host and is
* processed from the threaded IRQ handler. This code cannot (nor needs to)
* wait for any IRQ processing to complete.
*/
disable_irq_nosync(pvr_dev->irq);
}
do {
if (hard_reset) {
@@ -518,7 +545,7 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
queues_disabled = true;
}
err = pvr_power_fw_disable(pvr_dev, hard_reset);
err = pvr_power_fw_disable(pvr_dev, hard_reset, false);
if (!err) {
if (hard_reset) {
pvr_dev->fw_dev.booted = false;
@@ -541,7 +568,7 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
pvr_fw_irq_clear(pvr_dev);
err = pvr_power_fw_enable(pvr_dev);
err = pvr_power_fw_enable(pvr_dev, false);
}
if (err && hard_reset)

View File

@@ -96,12 +96,17 @@ struct vmwgfx_hash_item {
struct vmw_res_func;
struct vmw_bo;
struct vmw_bo;
struct vmw_resource_dirty;
/**
* struct vmw-resource - base class for hardware resources
* struct vmw_resource - base class for hardware resources
*
* @kref: For refcounting.
* @dev_priv: Pointer to the device private for this resource. Immutable.
* @id: Device id. Protected by @dev_priv::resource_lock.
* @used_prio: Priority for this resource.
* @guest_memory_size: Guest memory buffer size. Immutable.
* @res_dirty: Resource contains data not yet in the guest memory buffer.
* Protected by resource reserved.
@@ -117,18 +122,16 @@ struct vmw_res_func;
* pin-count greater than zero. It is not on the resource LRU lists and its
* guest memory buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable.
* @mob_node; Node for the MOB guest memory rbtree. Protected by
* @mob_node: Node for the MOB guest memory rbtree. Protected by
* @guest_memory_bo reserved.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
* @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex
* @dirty: resource's dirty tracker
* @res_free: The resource destructor.
* @hw_destroy: Callback to destroy the resource on the device, as part of
* resource destruction.
*/
struct vmw_bo;
struct vmw_bo;
struct vmw_resource_dirty;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
@@ -196,8 +199,8 @@ struct vmw_surface_offset;
* @quality_level: Quality level.
* @autogen_filter: Filter for automatically generated mipmaps.
* @array_size: Number of array elements for a 1D/2D texture. For cubemap
texture number of faces * array_size. This should be 0 for pre
SM4 device.
* texture number of faces * array_size. This should be 0 for pre
* SM4 device.
* @buffer_byte_stride: Buffer byte stride.
* @num_sizes: Size of @sizes. For GB surface this should always be 1.
* @base_size: Surface dimension.
@@ -265,18 +268,24 @@ struct vmw_fifo_state {
struct vmw_res_cache_entry {
uint32_t handle;
struct vmw_resource *res;
/* private: */
void *private;
/* public: */
unsigned short valid_handle;
unsigned short valid;
};
/**
* enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
* @vmw_dma_alloc_coherent: Use TTM coherent pages
* @vmw_dma_map_populate: Unmap from DMA just after unpopulate
* @vmw_dma_map_bind: Unmap from DMA just before unbind
*/
enum vmw_dma_map_mode {
vmw_dma_alloc_coherent, /* Use TTM coherent pages */
vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
vmw_dma_map_bind, /* Unmap from DMA just before unbind */
vmw_dma_alloc_coherent,
vmw_dma_map_populate,
vmw_dma_map_bind,
/* private: */
vmw_dma_map_max
};
@@ -284,8 +293,11 @@ enum vmw_dma_map_mode {
* struct vmw_sg_table - Scatter/gather table for binding, with additional
* device-specific information.
*
* @mode: which page mapping mode to use
* @pages: Array of page pointers to the pages.
* @addrs: DMA addresses to the pages if coherent pages are used.
* @sgt: Pointer to a struct sg_table with binding information
* @num_regions: Number of regions with device-address contiguous pages
* @num_pages: Number of @pages
*/
struct vmw_sg_table {
enum vmw_dma_map_mode mode;
@@ -353,6 +365,7 @@ struct vmw_ctx_validation_info;
* than from user-space
* @fp: If @kernel is false, points to the file of the client. Otherwise
* NULL
* @filp: DRM state for this file
* @cmd_bounce: Command bounce buffer used for command validation before
* copying to fifo space
* @cmd_bounce_size: Current command bounce buffer size
@@ -729,7 +742,7 @@ extern void vmw_svga_disable(struct vmw_private *dev_priv);
bool vmwgfx_supported(struct vmw_private *vmw);
/**
/*
* GMR utilities - vmwgfx_gmr.c
*/
@@ -739,7 +752,7 @@ extern int vmw_gmr_bind(struct vmw_private *dev_priv,
int gmr_id);
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
/*
* User handles
*/
struct vmw_user_object {
@@ -759,7 +772,7 @@ void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size);
void vmw_user_object_unmap(struct vmw_user_object *uo);
bool vmw_user_object_is_mapped(struct vmw_user_object *uo);
/**
/*
* Resource utilities - vmwgfx_resource.c
*/
struct vmw_user_resource_conv;
@@ -819,7 +832,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
return !RB_EMPTY_NODE(&res->mob_node);
}
/**
/*
* GEM related functionality - vmwgfx_gem.c
*/
struct vmw_bo_params;
@@ -833,7 +846,7 @@ extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
extern void vmw_debugfs_gem_init(struct vmw_private *vdev);
/**
/*
* Misc Ioctl functionality - vmwgfx_ioctl.c
*/
@@ -846,7 +859,7 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data,
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/**
/*
* Fifo utilities - vmwgfx_fifo.c
*/
@@ -880,9 +893,11 @@ extern int vmw_cmd_flush(struct vmw_private *dev_priv,
/**
* vmw_fifo_caps - Returns the capabilities of the FIFO command
* vmw_fifo_caps - Get the capabilities of the FIFO command
* queue or 0 if fifo memory isn't present.
* @dev_priv: The device private context
*
* Returns: capabilities of the FIFO command or %0 if fifo memory not present
*/
static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv)
{
@@ -893,9 +908,11 @@ static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv)
/**
* vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3
* is enabled in the FIFO.
* vmw_is_cursor_bypass3_enabled - check Cursor Bypass 3 enabled setting
* in the FIFO.
* @dev_priv: The device private context
*
* Returns: %true iff Cursor Bypass 3 is enabled in the FIFO
*/
static inline bool
vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
@@ -903,7 +920,7 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0;
}
/**
/*
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
@@ -927,7 +944,7 @@ extern void vmw_piter_start(struct vmw_piter *viter,
*
* @viter: Pointer to the iterator to advance.
*
* Returns false if past the list of pages, true otherwise.
* Returns: false if past the list of pages, true otherwise.
*/
static inline bool vmw_piter_next(struct vmw_piter *viter)
{
@@ -939,7 +956,7 @@ static inline bool vmw_piter_next(struct vmw_piter *viter)
*
* @viter: Pointer to the iterator
*
* Returns the DMA address of the page pointed to by @viter.
* Returns: the DMA address of the page pointed to by @viter.
*/
static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
{
@@ -951,14 +968,14 @@ static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
*
* @viter: Pointer to the iterator
*
* Returns the DMA address of the page pointed to by @viter.
* Returns: the DMA address of the page pointed to by @viter.
*/
static inline struct page *vmw_piter_page(struct vmw_piter *viter)
{
return viter->pages[viter->i];
}
/**
/*
* Command submission - vmwgfx_execbuf.c
*/
@@ -993,7 +1010,7 @@ extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
int32_t out_fence_fd);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
/*
* IRQs and wating - vmwgfx_irq.c
*/
@@ -1016,7 +1033,7 @@ bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count);
/**
/*
* Kernel modesetting - vmwgfx_kms.c
*/
@@ -1048,7 +1065,7 @@ extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
/**
/*
* Overlay control - vmwgfx_overlay.c
*/
@@ -1063,20 +1080,20 @@ int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
/**
/*
* GMR Id manager
*/
int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type);
void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type);
/**
/*
* System memory manager
*/
int vmw_sys_man_init(struct vmw_private *dev_priv);
void vmw_sys_man_fini(struct vmw_private *dev_priv);
/**
/*
* Prime - vmwgfx_prime.c
*/
@@ -1292,7 +1309,7 @@ extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
* @line: The current line of the blit.
* @line_offset: Offset of the current line segment.
* @cpp: Bytes per pixel (granularity information).
* @memcpy: Which memcpy function to use.
* @do_cpy: Which memcpy function to use.
*/
struct vmw_diff_cpy {
struct drm_rect rect;
@@ -1380,13 +1397,14 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
* @fmt: format string for the args
*
* This macro is for debugging vmwgfx mode-setting code.
*/
#define VMW_DEBUG_KMS(fmt, ...) \
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/**
/*
* Inline helper functions
*/
@@ -1417,11 +1435,13 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
/**
* vmw_fifo_mem_read - Perform a MMIO read from the fifo memory
*
* @vmw: The device private structure
* @fifo_reg: The fifo register to read from
*
* This function is intended to be equivalent to ioread32() on
* memremap'd memory, but without byteswapping.
*
* Returns: the value read
*/
static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
{
@@ -1431,8 +1451,9 @@ static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
/**
* vmw_fifo_mem_write - Perform a MMIO write to volatile memory
*
* @addr: The fifo register to write to
* @vmw: The device private structure
* @fifo_reg: The fifo register to write to
* @value: The value to write
*
* This function is intended to be equivalent to iowrite32 on
* memremap'd memory, but without byteswapping.

View File

@@ -771,7 +771,8 @@ err_out:
ret = vmw_bo_dirty_add(bo);
if (!ret && surface && surface->res.func->dirty_alloc) {
surface->res.coherent = true;
ret = surface->res.func->dirty_alloc(&surface->res);
if (surface->res.dirty == NULL)
ret = surface->res.func->dirty_alloc(&surface->res);
}
ttm_bo_unreserve(&bo->tbo);
}