mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
A bunch of fixes for 6.17:
- analogix_dp: devm_drm_bridge_alloc() error handling fix - gaudi: Memory deallocation fix - gpuvm: Documentation warning fix - hibmc: Various misc fixes - nouveau: Memory leak fixes, typos - panic: u64 division handling on 32 bits architecture fix - rockchip: Kconfig fix, register caching fix - rust: memory layout and safety fixes - tests: Endianness fixes -----BEGIN PGP SIGNATURE----- iJUEABMJAB0WIQTkHFbLp4ejekA/qfgnX84Zoj2+dgUCaKbU8AAKCRAnX84Zoj2+ dlzDAYCq9KkxgD1kDNyqREp+WGbjOPTvq9XkkVJIcJKycVZjMqsCdmJrp91vn3XY JnmXql0Bfjcysmz0OfPIUJtjlS94RFVkGF7ZVlBhqvIosqjfyFN5GGaLKXZyMYQK aIBJ+DFEcA== =2Zvh -----END PGP SIGNATURE----- Merge tag 'drm-misc-fixes-2025-08-21' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes A bunch of fixes for 6.17: - analogix_dp: devm_drm_bridge_alloc() error handling fix - gaudi: Memory deallocation fix - gpuvm: Documentation warning fix - hibmc: Various misc fixes - nouveau: Memory leak fixes, typos - panic: u64 division handling on 32 bits architecture fix - rockchip: Kconfig fix, register caching fix - rust: memory layout and safety fixes - tests: Endianness fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <mripard@redhat.com> Link: https://lore.kernel.org/r/20250821-economic-dandelion-rooster-c57fa9@houat
This commit is contained in:
commit
f9915c391c
14
MAINTAINERS
14
MAINTAINERS
@ -8426,6 +8426,17 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
|||||||
F: drivers/gpu/drm/scheduler/
|
F: drivers/gpu/drm/scheduler/
|
||||||
F: include/drm/gpu_scheduler.h
|
F: include/drm/gpu_scheduler.h
|
||||||
|
|
||||||
|
DRM GPUVM
|
||||||
|
M: Danilo Krummrich <dakr@kernel.org>
|
||||||
|
R: Matthew Brost <matthew.brost@intel.com>
|
||||||
|
R: Thomas Hellström <thomas.hellstrom@linux.intel.com>
|
||||||
|
R: Alice Ryhl <aliceryhl@google.com>
|
||||||
|
L: dri-devel@lists.freedesktop.org
|
||||||
|
S: Supported
|
||||||
|
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||||
|
F: drivers/gpu/drm/drm_gpuvm.c
|
||||||
|
F: include/drm/drm_gpuvm.h
|
||||||
|
|
||||||
DRM LOG
|
DRM LOG
|
||||||
M: Jocelyn Falempe <jfalempe@redhat.com>
|
M: Jocelyn Falempe <jfalempe@redhat.com>
|
||||||
M: Javier Martinez Canillas <javierm@redhat.com>
|
M: Javier Martinez Canillas <javierm@redhat.com>
|
||||||
@ -10655,7 +10666,8 @@ S: Maintained
|
|||||||
F: block/partitions/efi.*
|
F: block/partitions/efi.*
|
||||||
|
|
||||||
HABANALABS PCI DRIVER
|
HABANALABS PCI DRIVER
|
||||||
M: Yaron Avizrat <yaron.avizrat@intel.com>
|
M: Koby Elbaz <koby.elbaz@intel.com>
|
||||||
|
M: Konstantin Sinyuk <konstantin.sinyuk@intel.com>
|
||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
S: Supported
|
S: Supported
|
||||||
C: irc://irc.oftc.net/dri-devel
|
C: irc://irc.oftc.net/dri-devel
|
||||||
|
@ -10437,7 +10437,7 @@ end:
|
|||||||
(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
|
(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
|
||||||
WREG32(sob_addr, 0);
|
WREG32(sob_addr, 0);
|
||||||
|
|
||||||
kfree(lin_dma_pkts_arr);
|
kvfree(lin_dma_pkts_arr);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -514,7 +514,7 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (drm_gem_is_imported(obj)) {
|
if (drm_gem_is_imported(obj)) {
|
||||||
struct dma_buf *dma_buf = obj->dma_buf;
|
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
|
||||||
|
|
||||||
if (dma_buf->ops != &amdgpu_dmabuf_ops)
|
if (dma_buf->ops != &amdgpu_dmabuf_ops)
|
||||||
/* No XGMI with non AMD GPUs */
|
/* No XGMI with non AMD GPUs */
|
||||||
|
@ -317,7 +317,8 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
|||||||
*/
|
*/
|
||||||
if (!vm->is_compute_context || !vm->process_info)
|
if (!vm->is_compute_context || !vm->process_info)
|
||||||
return 0;
|
return 0;
|
||||||
if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf))
|
if (!drm_gem_is_imported(obj) ||
|
||||||
|
!dma_buf_is_dynamic(obj->import_attach->dmabuf))
|
||||||
return 0;
|
return 0;
|
||||||
mutex_lock_nested(&vm->process_info->lock, 1);
|
mutex_lock_nested(&vm->process_info->lock, 1);
|
||||||
if (!WARN_ON(!vm->process_info->eviction_fence)) {
|
if (!WARN_ON(!vm->process_info->eviction_fence)) {
|
||||||
|
@ -1283,7 +1283,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||||||
struct drm_gem_object *obj = &bo->tbo.base;
|
struct drm_gem_object *obj = &bo->tbo.base;
|
||||||
|
|
||||||
if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
|
if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
|
||||||
struct dma_buf *dma_buf = obj->dma_buf;
|
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
|
||||||
struct drm_gem_object *gobj = dma_buf->priv;
|
struct drm_gem_object *gobj = dma_buf->priv;
|
||||||
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
|
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
|
||||||
|
|
||||||
|
@ -1474,8 +1474,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
|
|||||||
|
|
||||||
dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge,
|
dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge,
|
||||||
&analogix_dp_bridge_funcs);
|
&analogix_dp_bridge_funcs);
|
||||||
if (!dp)
|
if (IS_ERR(dp))
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_CAST(dp);
|
||||||
|
|
||||||
dp->dev = &pdev->dev;
|
dp->dev = &pdev->dev;
|
||||||
dp->dpms_mode = DRM_MODE_DPMS_OFF;
|
dp->dpms_mode = DRM_MODE_DPMS_OFF;
|
||||||
|
@ -2432,6 +2432,8 @@ static const struct drm_gpuvm_ops lock_ops = {
|
|||||||
*
|
*
|
||||||
* The expected usage is:
|
* The expected usage is:
|
||||||
*
|
*
|
||||||
|
* .. code-block:: c
|
||||||
|
*
|
||||||
* vm_bind {
|
* vm_bind {
|
||||||
* struct drm_exec exec;
|
* struct drm_exec exec;
|
||||||
*
|
*
|
||||||
|
@ -381,6 +381,26 @@ struct DecFifo {
|
|||||||
len: usize,
|
len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// On arm32 architecture, dividing an `u64` by a constant will generate a call
|
||||||
|
// to `__aeabi_uldivmod` which is not present in the kernel.
|
||||||
|
// So use the multiply by inverse method for this architecture.
|
||||||
|
fn div10(val: u64) -> u64 {
|
||||||
|
if cfg!(target_arch = "arm") {
|
||||||
|
let val_h = val >> 32;
|
||||||
|
let val_l = val & 0xFFFFFFFF;
|
||||||
|
let b_h: u64 = 0x66666666;
|
||||||
|
let b_l: u64 = 0x66666667;
|
||||||
|
|
||||||
|
let tmp1 = val_h * b_l + ((val_l * b_l) >> 32);
|
||||||
|
let tmp2 = val_l * b_h + (tmp1 & 0xffffffff);
|
||||||
|
let tmp3 = val_h * b_h + (tmp1 >> 32) + (tmp2 >> 32);
|
||||||
|
|
||||||
|
tmp3 >> 2
|
||||||
|
} else {
|
||||||
|
val / 10
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl DecFifo {
|
impl DecFifo {
|
||||||
fn push(&mut self, data: u64, len: usize) {
|
fn push(&mut self, data: u64, len: usize) {
|
||||||
let mut chunk = data;
|
let mut chunk = data;
|
||||||
@ -389,7 +409,7 @@ impl DecFifo {
|
|||||||
}
|
}
|
||||||
for i in 0..len {
|
for i in 0..len {
|
||||||
self.decimals[i] = (chunk % 10) as u8;
|
self.decimals[i] = (chunk % 10) as u8;
|
||||||
chunk /= 10;
|
chunk = div10(chunk);
|
||||||
}
|
}
|
||||||
self.len += len;
|
self.len += len;
|
||||||
}
|
}
|
||||||
|
@ -325,6 +325,17 @@ static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp)
|
|||||||
return hibmc_dp_link_reduce_rate(dp);
|
return hibmc_dp_link_reduce_rate(dp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hibmc_dp_update_caps(struct hibmc_dp_dev *dp)
|
||||||
|
{
|
||||||
|
dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
|
||||||
|
if (dp->link.cap.link_rate > DP_LINK_BW_8_1 || !dp->link.cap.link_rate)
|
||||||
|
dp->link.cap.link_rate = DP_LINK_BW_8_1;
|
||||||
|
|
||||||
|
dp->link.cap.lanes = dp->dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
|
||||||
|
if (dp->link.cap.lanes > HIBMC_DP_LANE_NUM_MAX)
|
||||||
|
dp->link.cap.lanes = HIBMC_DP_LANE_NUM_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
|
int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
|
||||||
{
|
{
|
||||||
struct hibmc_dp_link *link = &dp->link;
|
struct hibmc_dp_link *link = &dp->link;
|
||||||
@ -334,8 +345,7 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
|
|||||||
if (ret)
|
if (ret)
|
||||||
drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
|
drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
|
||||||
|
|
||||||
dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
|
hibmc_dp_update_caps(dp);
|
||||||
dp->link.cap.lanes = 0x2;
|
|
||||||
|
|
||||||
ret = hibmc_dp_get_serdes_rate_cfg(dp);
|
ret = hibmc_dp_get_serdes_rate_cfg(dp);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
|
|
||||||
DEFINE_DRM_GEM_FOPS(hibmc_fops);
|
DEFINE_DRM_GEM_FOPS(hibmc_fops);
|
||||||
|
|
||||||
static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" };
|
static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "hibmc-vblank", "hibmc-hpd" };
|
||||||
|
|
||||||
static irqreturn_t hibmc_interrupt(int irq, void *arg)
|
static irqreturn_t hibmc_interrupt(int irq, void *arg)
|
||||||
{
|
{
|
||||||
@ -115,6 +115,8 @@ static const struct drm_mode_config_funcs hibmc_mode_funcs = {
|
|||||||
static int hibmc_kms_init(struct hibmc_drm_private *priv)
|
static int hibmc_kms_init(struct hibmc_drm_private *priv)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = &priv->dev;
|
struct drm_device *dev = &priv->dev;
|
||||||
|
struct drm_encoder *encoder;
|
||||||
|
u32 clone_mask = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = drmm_mode_config_init(dev);
|
ret = drmm_mode_config_init(dev);
|
||||||
@ -154,6 +156,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drm_for_each_encoder(encoder, dev)
|
||||||
|
clone_mask |= drm_encoder_mask(encoder);
|
||||||
|
|
||||||
|
drm_for_each_encoder(encoder, dev)
|
||||||
|
encoder->possible_clones = clone_mask;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -277,7 +285,6 @@ static void hibmc_unload(struct drm_device *dev)
|
|||||||
static int hibmc_msi_init(struct drm_device *dev)
|
static int hibmc_msi_init(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||||
char name[32] = {0};
|
|
||||||
int valid_irq_num;
|
int valid_irq_num;
|
||||||
int irq;
|
int irq;
|
||||||
int ret;
|
int ret;
|
||||||
@ -292,9 +299,6 @@ static int hibmc_msi_init(struct drm_device *dev)
|
|||||||
valid_irq_num = ret;
|
valid_irq_num = ret;
|
||||||
|
|
||||||
for (int i = 0; i < valid_irq_num; i++) {
|
for (int i = 0; i < valid_irq_num; i++) {
|
||||||
snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s",
|
|
||||||
dev->driver->name, pci_name(pdev), g_irqs_names_map[i]);
|
|
||||||
|
|
||||||
irq = pci_irq_vector(pdev, i);
|
irq = pci_irq_vector(pdev, i);
|
||||||
|
|
||||||
if (i)
|
if (i)
|
||||||
@ -302,10 +306,10 @@ static int hibmc_msi_init(struct drm_device *dev)
|
|||||||
ret = devm_request_threaded_irq(&pdev->dev, irq,
|
ret = devm_request_threaded_irq(&pdev->dev, irq,
|
||||||
hibmc_dp_interrupt,
|
hibmc_dp_interrupt,
|
||||||
hibmc_dp_hpd_isr,
|
hibmc_dp_hpd_isr,
|
||||||
IRQF_SHARED, name, dev);
|
IRQF_SHARED, g_irqs_names_map[i], dev);
|
||||||
else
|
else
|
||||||
ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
|
ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
|
||||||
IRQF_SHARED, name, dev);
|
IRQF_SHARED, g_irqs_names_map[i], dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
drm_err(dev, "install irq failed: %d\n", ret);
|
drm_err(dev, "install irq failed: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -323,13 +327,13 @@ static int hibmc_load(struct drm_device *dev)
|
|||||||
|
|
||||||
ret = hibmc_hw_init(priv);
|
ret = hibmc_hw_init(priv);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
return ret;
|
||||||
|
|
||||||
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
|
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
|
||||||
pci_resource_len(pdev, 0));
|
pci_resource_len(pdev, 0));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
|
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
|
||||||
goto err;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = hibmc_kms_init(priv);
|
ret = hibmc_kms_init(priv);
|
||||||
|
@ -69,6 +69,7 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
|
|||||||
int hibmc_vdac_init(struct hibmc_drm_private *priv);
|
int hibmc_vdac_init(struct hibmc_drm_private *priv);
|
||||||
|
|
||||||
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
|
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
|
||||||
|
void hibmc_ddc_del(struct hibmc_vdac *vdac);
|
||||||
|
|
||||||
int hibmc_dp_init(struct hibmc_drm_private *priv);
|
int hibmc_dp_init(struct hibmc_drm_private *priv);
|
||||||
|
|
||||||
|
@ -95,3 +95,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
|
|||||||
|
|
||||||
return i2c_bit_add_bus(&vdac->adapter);
|
return i2c_bit_add_bus(&vdac->adapter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void hibmc_ddc_del(struct hibmc_vdac *vdac)
|
||||||
|
{
|
||||||
|
i2c_del_adapter(&vdac->adapter);
|
||||||
|
}
|
||||||
|
@ -53,7 +53,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
|
|||||||
{
|
{
|
||||||
struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
|
struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
|
||||||
|
|
||||||
i2c_del_adapter(&vdac->adapter);
|
hibmc_ddc_del(vdac);
|
||||||
drm_connector_cleanup(connector);
|
drm_connector_cleanup(connector);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,7 +110,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
|
|||||||
ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
|
ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
drm_err(dev, "failed to init encoder: %d\n", ret);
|
drm_err(dev, "failed to init encoder: %d\n", ret);
|
||||||
return ret;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
|
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
|
||||||
@ -121,7 +121,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
|
|||||||
&vdac->adapter);
|
&vdac->adapter);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
drm_err(dev, "failed to init connector: %d\n", ret);
|
drm_err(dev, "failed to init connector: %d\n", ret);
|
||||||
return ret;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
|
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
|
||||||
@ -131,4 +131,9 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
|
|||||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
|
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
hibmc_ddc_del(vdac);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -60,14 +60,14 @@
|
|||||||
* virtual address in the GPU's VA space there is no guarantee that the actual
|
* virtual address in the GPU's VA space there is no guarantee that the actual
|
||||||
* mappings are created in the GPU's MMU. If the given memory is swapped out
|
* mappings are created in the GPU's MMU. If the given memory is swapped out
|
||||||
* at the time the bind operation is executed the kernel will stash the mapping
|
* at the time the bind operation is executed the kernel will stash the mapping
|
||||||
* details into it's internal alloctor and create the actual MMU mappings once
|
* details into it's internal allocator and create the actual MMU mappings once
|
||||||
* the memory is swapped back in. While this is transparent for userspace, it is
|
* the memory is swapped back in. While this is transparent for userspace, it is
|
||||||
* guaranteed that all the backing memory is swapped back in and all the memory
|
* guaranteed that all the backing memory is swapped back in and all the memory
|
||||||
* mappings, as requested by userspace previously, are actually mapped once the
|
* mappings, as requested by userspace previously, are actually mapped once the
|
||||||
* DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
|
* DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
|
||||||
*
|
*
|
||||||
* A VM_BIND job can be executed either synchronously or asynchronously. If
|
* A VM_BIND job can be executed either synchronously or asynchronously. If
|
||||||
* exectued asynchronously, userspace may provide a list of syncobjs this job
|
* executed asynchronously, userspace may provide a list of syncobjs this job
|
||||||
* will wait for and/or a list of syncobj the kernel will signal once the
|
* will wait for and/or a list of syncobj the kernel will signal once the
|
||||||
* VM_BIND job finished execution. If executed synchronously the ioctl will
|
* VM_BIND job finished execution. If executed synchronously the ioctl will
|
||||||
* block until the bind job is finished. For synchronous jobs the kernel will
|
* block until the bind job is finished. For synchronous jobs the kernel will
|
||||||
@ -82,7 +82,7 @@
|
|||||||
* Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
|
* Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
|
||||||
* an up to date view of the VA space. However, the actual mappings might still
|
* an up to date view of the VA space. However, the actual mappings might still
|
||||||
* be pending. Hence, EXEC jobs require to have the particular fences - of
|
* be pending. Hence, EXEC jobs require to have the particular fences - of
|
||||||
* the corresponding VM_BIND jobs they depent on - attached to them.
|
* the corresponding VM_BIND jobs they depend on - attached to them.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
|
|||||||
case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
|
case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
|
||||||
default:
|
default:
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(args->data, argv, argc);
|
memcpy(args->data, argv, argc);
|
||||||
|
@ -325,7 +325,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
|
|||||||
|
|
||||||
rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
|
rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
|
||||||
if (IS_ERR_OR_NULL(rpc)) {
|
if (IS_ERR_OR_NULL(rpc)) {
|
||||||
kfree(buf);
|
kvfree(buf);
|
||||||
return rpc;
|
return rpc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -334,7 +334,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
|
|||||||
|
|
||||||
rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
|
rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
|
||||||
if (IS_ERR_OR_NULL(rpc)) {
|
if (IS_ERR_OR_NULL(rpc)) {
|
||||||
kfree(buf);
|
kvfree(buf);
|
||||||
return rpc;
|
return rpc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,7 +39,8 @@ impl File {
|
|||||||
_ => return Err(EINVAL),
|
_ => return Err(EINVAL),
|
||||||
};
|
};
|
||||||
|
|
||||||
getparam.set_value(value);
|
#[allow(clippy::useless_conversion)]
|
||||||
|
getparam.set_value(value.into());
|
||||||
|
|
||||||
Ok(0)
|
Ok(0)
|
||||||
}
|
}
|
||||||
|
@ -53,6 +53,7 @@ config ROCKCHIP_CDN_DP
|
|||||||
bool "Rockchip cdn DP"
|
bool "Rockchip cdn DP"
|
||||||
depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
|
depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
|
||||||
select DRM_DISPLAY_HELPER
|
select DRM_DISPLAY_HELPER
|
||||||
|
select DRM_BRIDGE_CONNECTOR
|
||||||
select DRM_DISPLAY_DP_HELPER
|
select DRM_DISPLAY_DP_HELPER
|
||||||
help
|
help
|
||||||
This selects support for Rockchip SoC specific extensions
|
This selects support for Rockchip SoC specific extensions
|
||||||
|
@ -2579,12 +2579,13 @@ static int vop2_win_init(struct vop2 *vop2)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The window registers are only updated when config done is written.
|
* The window and video port registers are only updated when config
|
||||||
* Until that they read back the old value. As we read-modify-write
|
* done is written. Until that they read back the old value. As we
|
||||||
* these registers mark them as non-volatile. This makes sure we read
|
* read-modify-write these registers mark them as non-volatile. This
|
||||||
* the new values from the regmap register cache.
|
* makes sure we read the new values from the regmap register cache.
|
||||||
*/
|
*/
|
||||||
static const struct regmap_range vop2_nonvolatile_range[] = {
|
static const struct regmap_range vop2_nonvolatile_range[] = {
|
||||||
|
regmap_reg_range(RK3568_VP0_CTRL_BASE, RK3588_VP3_CTRL_BASE + 255),
|
||||||
regmap_reg_range(0x1000, 0x23ff),
|
regmap_reg_range(0x1000, 0x23ff),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1033,13 +1033,14 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
|
|||||||
NULL : &result->dst_pitch;
|
NULL : &result->dst_pitch;
|
||||||
|
|
||||||
drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
|
drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
|
||||||
buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
|
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
|
||||||
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
|
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
|
||||||
|
|
||||||
buf = dst.vaddr; /* restore original value of buf */
|
buf = dst.vaddr; /* restore original value of buf */
|
||||||
memset(buf, 0, dst_size);
|
memset(buf, 0, dst_size);
|
||||||
|
|
||||||
drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
|
drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
|
||||||
|
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
|
||||||
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
|
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map)
|
|||||||
*/
|
*/
|
||||||
static inline void iosys_map_clear(struct iosys_map *map)
|
static inline void iosys_map_clear(struct iosys_map *map)
|
||||||
{
|
{
|
||||||
if (map->is_iomem) {
|
memset(map, 0, sizeof(*map));
|
||||||
map->vaddr_iomem = NULL;
|
|
||||||
map->is_iomem = false;
|
|
||||||
} else {
|
|
||||||
map->vaddr = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -43,17 +43,6 @@ pub struct Vmalloc;
|
|||||||
/// For more details see [self].
|
/// For more details see [self].
|
||||||
pub struct KVmalloc;
|
pub struct KVmalloc;
|
||||||
|
|
||||||
/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
|
|
||||||
fn aligned_size(new_layout: Layout) -> usize {
|
|
||||||
// Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
|
|
||||||
let layout = new_layout.pad_to_align();
|
|
||||||
|
|
||||||
// Note that `layout.size()` (after padding) is guaranteed to be a multiple of `layout.align()`
|
|
||||||
// which together with the slab guarantees means the `krealloc` will return a properly aligned
|
|
||||||
// object (see comments in `kmalloc()` for more information).
|
|
||||||
layout.size()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # Invariants
|
/// # Invariants
|
||||||
///
|
///
|
||||||
/// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
|
/// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
|
||||||
@ -88,7 +77,7 @@ impl ReallocFunc {
|
|||||||
old_layout: Layout,
|
old_layout: Layout,
|
||||||
flags: Flags,
|
flags: Flags,
|
||||||
) -> Result<NonNull<[u8]>, AllocError> {
|
) -> Result<NonNull<[u8]>, AllocError> {
|
||||||
let size = aligned_size(layout);
|
let size = layout.size();
|
||||||
let ptr = match ptr {
|
let ptr = match ptr {
|
||||||
Some(ptr) => {
|
Some(ptr) => {
|
||||||
if old_layout.size() == 0 {
|
if old_layout.size() == 0 {
|
||||||
@ -123,6 +112,17 @@ impl ReallocFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Kmalloc {
|
||||||
|
/// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
|
||||||
|
/// `layout`.
|
||||||
|
pub fn aligned_layout(layout: Layout) -> Layout {
|
||||||
|
// Note that `layout.size()` (after padding) is guaranteed to be a multiple of
|
||||||
|
// `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
|
||||||
|
// a properly aligned object (see comments in `kmalloc()` for more information).
|
||||||
|
layout.pad_to_align()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
|
// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
|
||||||
// - memory remains valid until it is explicitly freed,
|
// - memory remains valid until it is explicitly freed,
|
||||||
// - passing a pointer to a valid memory allocation is OK,
|
// - passing a pointer to a valid memory allocation is OK,
|
||||||
@ -135,6 +135,8 @@ unsafe impl Allocator for Kmalloc {
|
|||||||
old_layout: Layout,
|
old_layout: Layout,
|
||||||
flags: Flags,
|
flags: Flags,
|
||||||
) -> Result<NonNull<[u8]>, AllocError> {
|
) -> Result<NonNull<[u8]>, AllocError> {
|
||||||
|
let layout = Kmalloc::aligned_layout(layout);
|
||||||
|
|
||||||
// SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`.
|
// SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`.
|
||||||
unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }
|
unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }
|
||||||
}
|
}
|
||||||
@ -176,6 +178,10 @@ unsafe impl Allocator for KVmalloc {
|
|||||||
old_layout: Layout,
|
old_layout: Layout,
|
||||||
flags: Flags,
|
flags: Flags,
|
||||||
) -> Result<NonNull<[u8]>, AllocError> {
|
) -> Result<NonNull<[u8]>, AllocError> {
|
||||||
|
// `KVmalloc` may use the `Kmalloc` backend, hence we have to enforce a `Kmalloc`
|
||||||
|
// compatible layout.
|
||||||
|
let layout = Kmalloc::aligned_layout(layout);
|
||||||
|
|
||||||
// TODO: Support alignments larger than PAGE_SIZE.
|
// TODO: Support alignments larger than PAGE_SIZE.
|
||||||
if layout.align() > bindings::PAGE_SIZE {
|
if layout.align() > bindings::PAGE_SIZE {
|
||||||
pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
|
pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
|
||||||
|
@ -22,6 +22,17 @@ pub type Kmalloc = Cmalloc;
|
|||||||
pub type Vmalloc = Kmalloc;
|
pub type Vmalloc = Kmalloc;
|
||||||
pub type KVmalloc = Kmalloc;
|
pub type KVmalloc = Kmalloc;
|
||||||
|
|
||||||
|
impl Cmalloc {
|
||||||
|
/// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
|
||||||
|
/// `layout`.
|
||||||
|
pub fn aligned_layout(layout: Layout) -> Layout {
|
||||||
|
// Note that `layout.size()` (after padding) is guaranteed to be a multiple of
|
||||||
|
// `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
|
||||||
|
// a properly aligned object (see comments in `kmalloc()` for more information).
|
||||||
|
layout.pad_to_align()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#[link_name = "aligned_alloc"]
|
#[link_name = "aligned_alloc"]
|
||||||
fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;
|
fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
//! C header: [`include/linux/drm/drm_device.h`](srctree/include/linux/drm/drm_device.h)
|
//! C header: [`include/linux/drm/drm_device.h`](srctree/include/linux/drm/drm_device.h)
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
alloc::allocator::Kmalloc,
|
||||||
bindings, device, drm,
|
bindings, device, drm,
|
||||||
drm::driver::AllocImpl,
|
drm::driver::AllocImpl,
|
||||||
error::from_err_ptr,
|
error::from_err_ptr,
|
||||||
@ -12,7 +13,7 @@ use crate::{
|
|||||||
prelude::*,
|
prelude::*,
|
||||||
types::{ARef, AlwaysRefCounted, Opaque},
|
types::{ARef, AlwaysRefCounted, Opaque},
|
||||||
};
|
};
|
||||||
use core::{mem, ops::Deref, ptr, ptr::NonNull};
|
use core::{alloc::Layout, mem, ops::Deref, ptr, ptr::NonNull};
|
||||||
|
|
||||||
#[cfg(CONFIG_DRM_LEGACY)]
|
#[cfg(CONFIG_DRM_LEGACY)]
|
||||||
macro_rules! drm_legacy_fields {
|
macro_rules! drm_legacy_fields {
|
||||||
@ -53,10 +54,8 @@ macro_rules! drm_legacy_fields {
|
|||||||
///
|
///
|
||||||
/// `self.dev` is a valid instance of a `struct device`.
|
/// `self.dev` is a valid instance of a `struct device`.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[pin_data]
|
|
||||||
pub struct Device<T: drm::Driver> {
|
pub struct Device<T: drm::Driver> {
|
||||||
dev: Opaque<bindings::drm_device>,
|
dev: Opaque<bindings::drm_device>,
|
||||||
#[pin]
|
|
||||||
data: T::Data,
|
data: T::Data,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,6 +95,10 @@ impl<T: drm::Driver> Device<T> {
|
|||||||
|
|
||||||
/// Create a new `drm::Device` for a `drm::Driver`.
|
/// Create a new `drm::Device` for a `drm::Driver`.
|
||||||
pub fn new(dev: &device::Device, data: impl PinInit<T::Data, Error>) -> Result<ARef<Self>> {
|
pub fn new(dev: &device::Device, data: impl PinInit<T::Data, Error>) -> Result<ARef<Self>> {
|
||||||
|
// `__drm_dev_alloc` uses `kmalloc()` to allocate memory, hence ensure a `kmalloc()`
|
||||||
|
// compatible `Layout`.
|
||||||
|
let layout = Kmalloc::aligned_layout(Layout::new::<Self>());
|
||||||
|
|
||||||
// SAFETY:
|
// SAFETY:
|
||||||
// - `VTABLE`, as a `const` is pinned to the read-only section of the compilation,
|
// - `VTABLE`, as a `const` is pinned to the read-only section of the compilation,
|
||||||
// - `dev` is valid by its type invarants,
|
// - `dev` is valid by its type invarants,
|
||||||
@ -103,7 +106,7 @@ impl<T: drm::Driver> Device<T> {
|
|||||||
bindings::__drm_dev_alloc(
|
bindings::__drm_dev_alloc(
|
||||||
dev.as_raw(),
|
dev.as_raw(),
|
||||||
&Self::VTABLE,
|
&Self::VTABLE,
|
||||||
mem::size_of::<Self>(),
|
layout.size(),
|
||||||
mem::offset_of!(Self, dev),
|
mem::offset_of!(Self, dev),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -117,9 +120,13 @@ impl<T: drm::Driver> Device<T> {
|
|||||||
// - `raw_data` is a valid pointer to uninitialized memory.
|
// - `raw_data` is a valid pointer to uninitialized memory.
|
||||||
// - `raw_data` will not move until it is dropped.
|
// - `raw_data` will not move until it is dropped.
|
||||||
unsafe { data.__pinned_init(raw_data) }.inspect_err(|_| {
|
unsafe { data.__pinned_init(raw_data) }.inspect_err(|_| {
|
||||||
// SAFETY: `__drm_dev_alloc()` was successful, hence `raw_drm` must be valid and the
|
// SAFETY: `raw_drm` is a valid pointer to `Self`, given that `__drm_dev_alloc` was
|
||||||
|
// successful.
|
||||||
|
let drm_dev = unsafe { Self::into_drm_device(raw_drm) };
|
||||||
|
|
||||||
|
// SAFETY: `__drm_dev_alloc()` was successful, hence `drm_dev` must be valid and the
|
||||||
// refcount must be non-zero.
|
// refcount must be non-zero.
|
||||||
unsafe { bindings::drm_dev_put(ptr::addr_of_mut!((*raw_drm.as_ptr()).dev).cast()) };
|
unsafe { bindings::drm_dev_put(drm_dev) };
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// SAFETY: The reference count is one, and now we take ownership of that reference as a
|
// SAFETY: The reference count is one, and now we take ownership of that reference as a
|
||||||
@ -140,6 +147,14 @@ impl<T: drm::Driver> Device<T> {
|
|||||||
unsafe { crate::container_of!(Opaque::cast_from(ptr), Self, dev) }.cast_mut()
|
unsafe { crate::container_of!(Opaque::cast_from(ptr), Self, dev) }.cast_mut()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// `ptr` must be a valid pointer to `Self`.
|
||||||
|
unsafe fn into_drm_device(ptr: NonNull<Self>) -> *mut bindings::drm_device {
|
||||||
|
// SAFETY: By the safety requirements of this function, `ptr` is a valid pointer to `Self`.
|
||||||
|
unsafe { &raw mut (*ptr.as_ptr()).dev }.cast()
|
||||||
|
}
|
||||||
|
|
||||||
/// Not intended to be called externally, except via declare_drm_ioctls!()
|
/// Not intended to be called externally, except via declare_drm_ioctls!()
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
@ -189,8 +204,11 @@ unsafe impl<T: drm::Driver> AlwaysRefCounted for Device<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn dec_ref(obj: NonNull<Self>) {
|
unsafe fn dec_ref(obj: NonNull<Self>) {
|
||||||
|
// SAFETY: `obj` is a valid pointer to `Self`.
|
||||||
|
let drm_dev = unsafe { Self::into_drm_device(obj) };
|
||||||
|
|
||||||
// SAFETY: The safety requirements guarantee that the refcount is non-zero.
|
// SAFETY: The safety requirements guarantee that the refcount is non-zero.
|
||||||
unsafe { bindings::drm_dev_put(obj.cast().as_ptr()) };
|
unsafe { bindings::drm_dev_put(drm_dev) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user