mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
amd-drm-next-6.17-2025-07-17:
amdgpu: - Partition fixes - Reset fixes - RAS fixes - i2c fix - MPC updates - DSC cleanup - EDID fixes - Display idle D3 update - IPS updates - DMUB updates - Retimer fix - Replay fixes - Fix DC memory leak - Initial support for smartmux - DCN 4.0.1 degamma LUT fix - Per queue reset cleanups - Track ring state associated with a fence - SR-IOV fixes - SMU fixes - Per queue reset improvements for GC 9+ compute - Per queue reset improvements for GC 10+ gfx - Per queue reset improvements for SDMA 5+ - Per queue reset improvements for JPEG 2+ - Per queue reset improvements for VCN 2+ - GC 8 fix - ISP updates amdkfd: - Enable KFD on LoongArch radeon: - Drop console lock during suspend/resume UAPI: - Add userq slot info to INFO IOCTL Used for IGT userq validation tests (https://lists.freedesktop.org/archives/igt-dev/2025-July/093228.html) -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCaHlrvgAKCRC93/aFa7yZ 2LuDAP9c5phb6TBIFEmLlQEcZ+Ngx8LVkEl1JIfAhwuAwN2V0wD9H1pVVNDXmFlh jfB8hLYikqnxYznXuImvutGEBHM8BgQ= =W2D8 -----END PGP SIGNATURE----- Merge tag 'amd-drm-next-6.17-2025-07-17' of https://gitlab.freedesktop.org/agd5f/linux into drm-next amd-drm-next-6.17-2025-07-17: amdgpu: - Partition fixes - Reset fixes - RAS fixes - i2c fix - MPC updates - DSC cleanup - EDID fixes - Display idle D3 update - IPS updates - DMUB updates - Retimer fix - Replay fixes - Fix DC memory leak - Initial support for smartmux - DCN 4.0.1 degamma LUT fix - Per queue reset cleanups - Track ring state associated with a fence - SR-IOV fixes - SMU fixes - Per queue reset improvements for GC 9+ compute - Per queue reset improvements for GC 10+ gfx - Per queue reset improvements for SDMA 5+ - Per queue reset improvements for JPEG 2+ - Per queue reset improvements for VCN 2+ - GC 8 fix - ISP updates amdkfd: - Enable KFD on LoongArch radeon: - Drop console lock during suspend/resume UAPI: - Add userq slot info to INFO IOCTL Used for IGT userq validation tests (https://lists.freedesktop.org/archives/igt-dev/2025-July/093228.html) From: Alex Deucher <alexander.deucher@amd.com> Link: https://lore.kernel.org/r/20250717213827.2061581-1-alexander.deucher@amd.com Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
commit
acab5fbd77
@ -1723,7 +1723,7 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_ISP)
|
||||
int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]);
|
||||
int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev);
|
||||
#endif
|
||||
|
||||
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
|
||||
|
@ -1545,7 +1545,7 @@ static int isp_match_acpi_device_ids(struct device *dev, const void *data)
|
||||
return acpi_match_device(data, dev) ? 1 : 0;
|
||||
}
|
||||
|
||||
int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN])
|
||||
int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev)
|
||||
{
|
||||
struct device *pdev __free(put_device) = NULL;
|
||||
struct acpi_device *acpi_pdev;
|
||||
@ -1559,7 +1559,7 @@ int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN])
|
||||
if (!acpi_pdev)
|
||||
return -ENODEV;
|
||||
|
||||
strscpy(*hid, acpi_device_hid(acpi_pdev));
|
||||
*dev = acpi_pdev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4220,18 +4220,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* By default timeout for non compute jobs is 10000
|
||||
* and 60000 for compute jobs.
|
||||
* In SR-IOV or passthrough mode, timeout for compute
|
||||
* jobs are 60000 by default.
|
||||
* By default timeout for jobs is 10 sec
|
||||
*/
|
||||
adev->gfx_timeout = msecs_to_jiffies(10000);
|
||||
adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000);
|
||||
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
|
||||
msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
|
||||
else
|
||||
adev->compute_timeout = msecs_to_jiffies(60000);
|
||||
|
||||
if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
|
||||
while ((timeout_setting = strsep(&input, ",")) &&
|
||||
|
@ -362,12 +362,12 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint
|
||||
* The second one is for Compute. The third and fourth ones are
|
||||
* for SDMA and Video.
|
||||
*
|
||||
* By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
|
||||
* jobs is 10000. The timeout for compute is 60000.
|
||||
* By default(with no lockup_timeout settings), the timeout for all jobs is 10000.
|
||||
*/
|
||||
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
|
||||
"for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
|
||||
"for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
|
||||
MODULE_PARM_DESC(lockup_timeout,
|
||||
"GPU lockup timeout in ms (default: 10000 for all jobs. "
|
||||
"0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
|
||||
"for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
|
||||
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
|
||||
|
||||
/**
|
||||
@ -2512,6 +2512,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
|
||||
amdgpu_ras_eeprom_check_and_recover(adev);
|
||||
amdgpu_xcp_dev_unplug(adev);
|
||||
amdgpu_gmc_prepare_nps_mode_change(adev);
|
||||
drm_dev_unplug(dev);
|
||||
|
@ -120,6 +120,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||
am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
|
||||
if (!am_fence)
|
||||
return -ENOMEM;
|
||||
am_fence->context = 0;
|
||||
} else {
|
||||
am_fence = af;
|
||||
}
|
||||
@ -127,6 +128,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||
am_fence->ring = ring;
|
||||
|
||||
seq = ++ring->fence_drv.sync_seq;
|
||||
am_fence->seq = seq;
|
||||
if (af) {
|
||||
dma_fence_init(fence, &amdgpu_job_fence_ops,
|
||||
&ring->fence_drv.lock,
|
||||
@ -141,6 +143,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
||||
amdgpu_fence_save_wptr(fence);
|
||||
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
|
||||
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
||||
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
|
||||
@ -253,6 +256,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||
|
||||
do {
|
||||
struct dma_fence *fence, **ptr;
|
||||
struct amdgpu_fence *am_fence;
|
||||
|
||||
++last_seq;
|
||||
last_seq &= drv->num_fences_mask;
|
||||
@ -265,6 +269,12 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||
if (!fence)
|
||||
continue;
|
||||
|
||||
/* Save the wptr in the fence driver so we know what the last processed
|
||||
* wptr was. This is required for re-emitting the ring state for
|
||||
* queues that are reset but are not guilty and thus have no guilty fence.
|
||||
*/
|
||||
am_fence = container_of(fence, struct amdgpu_fence, base);
|
||||
drv->signalled_wptr = am_fence->wptr;
|
||||
dma_fence_signal(fence);
|
||||
dma_fence_put(fence);
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
@ -727,6 +737,86 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
|
||||
amdgpu_fence_process(ring);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Kernel queue reset handling
|
||||
*
|
||||
* The driver can reset individual queues for most engines, but those queues
|
||||
* may contain work from multiple contexts. Resetting the queue will reset
|
||||
* lose all of that state. In order to minimize the collateral damage, the
|
||||
* driver will save the ring contents which are not associated with the guilty
|
||||
* context prior to resetting the queue. After resetting the queue the queue
|
||||
* contents from the other contexts is re-emitted to the rings so that it can
|
||||
* be processed by the engine. To handle this, we save the queue's write
|
||||
* pointer (wptr) in the fences associated with each context. If we get a
|
||||
* queue timeout, we can then use the wptrs from the fences to determine
|
||||
* which data needs to be saved out of the queue's ring buffer.
|
||||
*/
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
|
||||
*
|
||||
* @fence: fence of the ring to signal
|
||||
*
|
||||
*/
|
||||
void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence)
|
||||
{
|
||||
dma_fence_set_error(&fence->base, -ETIME);
|
||||
amdgpu_fence_write(fence->ring, fence->seq);
|
||||
amdgpu_fence_process(fence->ring);
|
||||
}
|
||||
|
||||
void amdgpu_fence_save_wptr(struct dma_fence *fence)
|
||||
{
|
||||
struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base);
|
||||
|
||||
am_fence->wptr = am_fence->ring->wptr;
|
||||
}
|
||||
|
||||
static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
|
||||
u64 start_wptr, u32 end_wptr)
|
||||
{
|
||||
unsigned int first_idx = start_wptr & ring->buf_mask;
|
||||
unsigned int last_idx = end_wptr & ring->buf_mask;
|
||||
unsigned int i;
|
||||
|
||||
/* Backup the contents of the ring buffer. */
|
||||
for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
|
||||
ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
|
||||
}
|
||||
|
||||
void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence *guilty_fence)
|
||||
{
|
||||
struct dma_fence *unprocessed;
|
||||
struct dma_fence __rcu **ptr;
|
||||
struct amdgpu_fence *fence;
|
||||
u64 wptr, i, seqno;
|
||||
|
||||
seqno = amdgpu_fence_read(ring);
|
||||
wptr = ring->fence_drv.signalled_wptr;
|
||||
ring->ring_backup_entries_to_copy = 0;
|
||||
|
||||
for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) {
|
||||
ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask];
|
||||
rcu_read_lock();
|
||||
unprocessed = rcu_dereference(*ptr);
|
||||
|
||||
if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
|
||||
fence = container_of(unprocessed, struct amdgpu_fence, base);
|
||||
|
||||
/* save everything if the ring is not guilty, otherwise
|
||||
* just save the content from other contexts.
|
||||
*/
|
||||
if (!guilty_fence || (fence->context != guilty_fence->context))
|
||||
amdgpu_ring_backup_unprocessed_command(ring, wptr,
|
||||
fence->wptr);
|
||||
wptr = fence->wptr;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Common fence implementation
|
||||
*/
|
||||
|
@ -139,7 +139,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
|
||||
int vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
bool need_pipe_sync = false;
|
||||
unsigned int cond_exec;
|
||||
|
||||
unsigned int i;
|
||||
int r = 0;
|
||||
|
||||
@ -156,6 +155,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
|
||||
gds_va = job->gds_va;
|
||||
init_shadow = job->init_shadow;
|
||||
af = &job->hw_fence;
|
||||
/* Save the context of the job for reset handling.
|
||||
* The driver needs this so it can skip the ring
|
||||
* contents for guilty contexts.
|
||||
*/
|
||||
af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
|
||||
} else {
|
||||
vm = NULL;
|
||||
fence_ctx = 0;
|
||||
@ -307,8 +311,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
|
||||
ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
|
||||
ring->funcs->emit_wave_limit(ring, false);
|
||||
|
||||
/* Save the wptr associated with this fence.
|
||||
* This must be last for resets to work properly
|
||||
* as we need to save the wptr associated with this
|
||||
* fence so we know what rings contents to backup
|
||||
* after we reset the queue.
|
||||
*/
|
||||
amdgpu_fence_save_wptr(*f);
|
||||
|
||||
amdgpu_ring_ib_end(ring);
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -624,7 +624,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned int type)
|
||||
{
|
||||
/* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
|
||||
if (amdgpu_ras_is_rma(adev))
|
||||
if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type))
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->irq.installed)
|
||||
|
@ -33,6 +33,8 @@
|
||||
#include "isp_v4_1_0.h"
|
||||
#include "isp_v4_1_1.h"
|
||||
|
||||
#define ISP_MC_ADDR_ALIGN (1024 * 32)
|
||||
|
||||
/**
|
||||
* isp_hw_init - start and test isp block
|
||||
*
|
||||
@ -141,6 +143,179 @@ static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev)
|
||||
{
|
||||
if (isp_parent != amdgpu_dev)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* isp_user_buffer_alloc - create user buffer object (BO) for isp
|
||||
*
|
||||
* @dev: isp device handle
|
||||
* @dmabuf: DMABUF handle for isp buffer allocated in system memory
|
||||
* @buf_obj: GPU buffer object handle to initialize
|
||||
* @buf_addr: GPU addr of the pinned BO to initialize
|
||||
*
|
||||
* Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
|
||||
* GART alloc to generate GPU addr for BO to make it accessible through the
|
||||
* GART aperture for ISP HW.
|
||||
*
|
||||
* This function is exported to allow the V4L2 isp device external to drm device
|
||||
* to create and access the isp user BO.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code otherwise.
|
||||
*/
|
||||
int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
|
||||
void **buf_obj, u64 *buf_addr)
|
||||
{
|
||||
struct platform_device *ispdev = to_platform_device(dev);
|
||||
const struct isp_platform_data *isp_pdata;
|
||||
struct amdgpu_device *adev;
|
||||
struct mfd_cell *mfd_cell;
|
||||
struct amdgpu_bo *bo;
|
||||
u64 gpu_addr;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!ispdev))
|
||||
return -ENODEV;
|
||||
|
||||
if (WARN_ON(!buf_obj))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(!buf_addr))
|
||||
return -EINVAL;
|
||||
|
||||
mfd_cell = &ispdev->mfd_cell[0];
|
||||
if (!mfd_cell)
|
||||
return -ENODEV;
|
||||
|
||||
isp_pdata = mfd_cell->platform_data;
|
||||
adev = isp_pdata->adev;
|
||||
|
||||
ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdgpu_bo_create_isp_user(adev, dmabuf,
|
||||
AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr);
|
||||
if (ret) {
|
||||
drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*buf_obj = (void *)bo;
|
||||
*buf_addr = gpu_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(isp_user_buffer_alloc);
|
||||
|
||||
/**
|
||||
* isp_user_buffer_free - free isp user buffer object (BO)
|
||||
*
|
||||
* @buf_obj: amdgpu isp user BO to free
|
||||
*
|
||||
* unpin and unref BO for isp internal use.
|
||||
*
|
||||
* This function is exported to allow the V4L2 isp device
|
||||
* external to drm device to free the isp user BO.
|
||||
*/
|
||||
void isp_user_buffer_free(void *buf_obj)
|
||||
{
|
||||
amdgpu_bo_free_isp_user(buf_obj);
|
||||
}
|
||||
EXPORT_SYMBOL(isp_user_buffer_free);
|
||||
|
||||
/**
|
||||
* isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp
|
||||
*
|
||||
* @dev: isp device handle
|
||||
* @size: size for the new BO
|
||||
* @buf_obj: GPU BO handle to initialize
|
||||
* @gpu_addr: GPU addr of the pinned BO
|
||||
* @cpu_addr: CPU address mapping of BO
|
||||
*
|
||||
* Allocates and pins a kernel BO for internal isp firmware use.
|
||||
*
|
||||
* This function is exported to allow the V4L2 isp device
|
||||
* external to drm device to create and access the kernel BO.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code otherwise.
|
||||
*/
|
||||
int isp_kernel_buffer_alloc(struct device *dev, u64 size,
|
||||
void **buf_obj, u64 *gpu_addr, void **cpu_addr)
|
||||
{
|
||||
struct platform_device *ispdev = to_platform_device(dev);
|
||||
struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
|
||||
const struct isp_platform_data *isp_pdata;
|
||||
struct amdgpu_device *adev;
|
||||
struct mfd_cell *mfd_cell;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!ispdev))
|
||||
return -ENODEV;
|
||||
|
||||
if (WARN_ON(!buf_obj))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(!gpu_addr))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(!cpu_addr))
|
||||
return -EINVAL;
|
||||
|
||||
mfd_cell = &ispdev->mfd_cell[0];
|
||||
if (!mfd_cell)
|
||||
return -ENODEV;
|
||||
|
||||
isp_pdata = mfd_cell->platform_data;
|
||||
adev = isp_pdata->adev;
|
||||
|
||||
ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdgpu_bo_create_kernel(adev,
|
||||
size,
|
||||
ISP_MC_ADDR_ALIGN,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
bo,
|
||||
gpu_addr,
|
||||
cpu_addr);
|
||||
if (!cpu_addr || ret) {
|
||||
drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(isp_kernel_buffer_alloc);
|
||||
|
||||
/**
|
||||
* isp_kernel_buffer_free - free isp kernel buffer object (BO)
|
||||
*
|
||||
* @buf_obj: amdgpu isp user BO to free
|
||||
* @gpu_addr: GPU addr of isp kernel BO
|
||||
* @cpu_addr: CPU addr of isp kernel BO
|
||||
*
|
||||
* unmaps and unpin a isp kernel BO.
|
||||
*
|
||||
* This function is exported to allow the V4L2 isp device
|
||||
* external to drm device to free the kernel BO.
|
||||
*/
|
||||
void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
|
||||
{
|
||||
struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
|
||||
|
||||
amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr);
|
||||
}
|
||||
EXPORT_SYMBOL(isp_kernel_buffer_free);
|
||||
|
||||
static const struct amd_ip_funcs isp_ip_funcs = {
|
||||
.name = "isp_ip",
|
||||
.early_init = isp_early_init,
|
||||
|
@ -28,18 +28,13 @@
|
||||
#ifndef __AMDGPU_ISP_H__
|
||||
#define __AMDGPU_ISP_H__
|
||||
|
||||
#include <drm/amd/isp.h>
|
||||
#include <linux/pm_domain.h>
|
||||
|
||||
#define ISP_REGS_OFFSET_END 0x629A4
|
||||
|
||||
struct amdgpu_isp;
|
||||
|
||||
struct isp_platform_data {
|
||||
void *adev;
|
||||
u32 asic_type;
|
||||
resource_size_t base_rmmio_size;
|
||||
};
|
||||
|
||||
struct isp_funcs {
|
||||
int (*hw_init)(struct amdgpu_isp *isp);
|
||||
int (*hw_fini)(struct amdgpu_isp *isp);
|
||||
|
@ -112,6 +112,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
||||
amdgpu_job_core_dump(adev, job);
|
||||
|
||||
if (amdgpu_gpu_recovery &&
|
||||
amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
|
||||
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
|
||||
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
|
||||
s_job->sched->name);
|
||||
@ -131,10 +132,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
||||
/* attempt a per ring reset */
|
||||
if (unlikely(adev->debug_disable_gpu_ring_reset)) {
|
||||
dev_err(adev->dev, "Ring reset disabled by debug mask\n");
|
||||
} else if (amdgpu_gpu_recovery && ring->funcs->reset) {
|
||||
} else if (amdgpu_gpu_recovery &&
|
||||
amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
|
||||
ring->funcs->reset) {
|
||||
dev_err(adev->dev, "Starting %s ring reset\n",
|
||||
s_job->sched->name);
|
||||
r = amdgpu_ring_reset(ring, job->vmid, NULL);
|
||||
r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence);
|
||||
if (!r) {
|
||||
atomic_inc(&ring->adev->gpu_reset_counter);
|
||||
dev_err(adev->dev, "Ring %s reset succeeded\n",
|
||||
|
@ -399,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
||||
uint32_t ib_size_alignment = 0;
|
||||
enum amd_ip_block_type type;
|
||||
unsigned int num_rings = 0;
|
||||
uint32_t num_slots = 0;
|
||||
unsigned int i, j;
|
||||
|
||||
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
|
||||
@ -411,6 +412,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
||||
if (adev->gfx.gfx_ring[i].sched.ready &&
|
||||
!adev->gfx.gfx_ring[i].no_user_submission)
|
||||
++num_rings;
|
||||
|
||||
if (!adev->gfx.disable_uq) {
|
||||
for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
|
||||
num_slots += hweight32(adev->mes.gfx_hqd_mask[i]);
|
||||
}
|
||||
|
||||
ib_start_alignment = 32;
|
||||
ib_size_alignment = 32;
|
||||
break;
|
||||
@ -420,6 +427,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
||||
if (adev->gfx.compute_ring[i].sched.ready &&
|
||||
!adev->gfx.compute_ring[i].no_user_submission)
|
||||
++num_rings;
|
||||
|
||||
if (!adev->sdma.disable_uq) {
|
||||
for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++)
|
||||
num_slots += hweight32(adev->mes.compute_hqd_mask[i]);
|
||||
}
|
||||
|
||||
ib_start_alignment = 32;
|
||||
ib_size_alignment = 32;
|
||||
break;
|
||||
@ -429,6 +442,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
||||
if (adev->sdma.instance[i].ring.sched.ready &&
|
||||
!adev->sdma.instance[i].ring.no_user_submission)
|
||||
++num_rings;
|
||||
|
||||
if (!adev->gfx.disable_uq) {
|
||||
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++)
|
||||
num_slots += hweight32(adev->mes.sdma_hqd_mask[i]);
|
||||
}
|
||||
|
||||
ib_start_alignment = 256;
|
||||
ib_size_alignment = 4;
|
||||
break;
|
||||
@ -570,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
||||
}
|
||||
result->capabilities_flags = 0;
|
||||
result->available_rings = (1 << num_rings) - 1;
|
||||
result->userq_num_slots = num_slots;
|
||||
result->ib_start_alignment = ib_start_alignment;
|
||||
result->ib_size_alignment = ib_size_alignment;
|
||||
return 0;
|
||||
|
@ -352,7 +352,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(amdgpu_bo_create_kernel);
|
||||
|
||||
/**
|
||||
* amdgpu_bo_create_isp_user - create user BO for isp
|
||||
@ -421,7 +420,6 @@ error_unreserve:
|
||||
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL(amdgpu_bo_create_isp_user);
|
||||
|
||||
/**
|
||||
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
|
||||
@ -525,7 +523,6 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
|
||||
if (cpu_addr)
|
||||
*cpu_addr = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(amdgpu_bo_free_kernel);
|
||||
|
||||
/**
|
||||
* amdgpu_bo_free_isp_user - free BO for isp use
|
||||
@ -548,7 +545,6 @@ void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
|
||||
}
|
||||
amdgpu_bo_unref(&bo);
|
||||
}
|
||||
EXPORT_SYMBOL(amdgpu_bo_free_isp_user);
|
||||
|
||||
/* Validate bo size is bit bigger than the request domain */
|
||||
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
|
||||
|
@ -2857,6 +2857,13 @@ static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
|
||||
if (amdgpu_umc_pages_in_a_row(adev, err_data,
|
||||
bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
for (i = 0; i < adev->umc.retire_unit; i++) {
|
||||
err_data->err_addr[i].address = bps[0].address;
|
||||
err_data->err_addr[i].mem_channel = bps[0].mem_channel;
|
||||
err_data->err_addr[i].bank = bps[0].bank;
|
||||
err_data->err_addr[i].err_type = bps[0].err_type;
|
||||
err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
|
||||
}
|
||||
} else {
|
||||
if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
|
||||
return -EINVAL;
|
||||
@ -2888,6 +2895,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
|
||||
struct eeprom_table_record *bps, struct ras_err_data *err_data,
|
||||
enum amdgpu_memory_partition nps)
|
||||
{
|
||||
int i = 0;
|
||||
enum amdgpu_memory_partition save_nps;
|
||||
|
||||
save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
|
||||
@ -2897,6 +2905,13 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
|
||||
if (amdgpu_umc_pages_in_a_row(adev, err_data,
|
||||
bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
for (i = 0; i < adev->umc.retire_unit; i++) {
|
||||
err_data->err_addr[i].address = bps->address;
|
||||
err_data->err_addr[i].mem_channel = bps->mem_channel;
|
||||
err_data->err_addr[i].bank = bps->bank;
|
||||
err_data->err_addr[i].err_type = bps->err_type;
|
||||
err_data->err_addr[i].mcumc_id = bps->mcumc_id;
|
||||
}
|
||||
} else {
|
||||
if (bps->address) {
|
||||
if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
|
||||
|
@ -1531,3 +1531,31 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
|
||||
|
||||
return res < 0 ? res : 0;
|
||||
}
|
||||
|
||||
void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
struct amdgpu_ras_eeprom_control *control;
|
||||
int res;
|
||||
|
||||
if (!__is_ras_eeprom_supported(adev) || !ras)
|
||||
return;
|
||||
control = &ras->eeprom_control;
|
||||
if (!control->is_eeprom_valid)
|
||||
return;
|
||||
res = __verify_ras_table_checksum(control);
|
||||
if (res) {
|
||||
dev_warn(adev->dev,
|
||||
"RAS table incorrect checksum or error:%d, try to recover\n",
|
||||
res);
|
||||
if (!amdgpu_ras_eeprom_reset_table(control))
|
||||
if (!amdgpu_ras_save_bad_pages(adev, NULL))
|
||||
if (!__verify_ras_table_checksum(control)) {
|
||||
dev_info(adev->dev, "RAS table recovery succeed\n");
|
||||
return;
|
||||
}
|
||||
dev_err(adev->dev, "RAS table recovery failed\n");
|
||||
control->is_eeprom_valid = false;
|
||||
}
|
||||
return;
|
||||
}
|
@ -161,6 +161,8 @@ void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
|
||||
|
||||
int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
|
||||
|
||||
void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev);
|
||||
|
||||
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
|
||||
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
|
||||
|
||||
|
@ -99,6 +99,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @ndw: number of dwords to allocate in the ring buffer
|
||||
*
|
||||
* Allocate @ndw dwords in the ring buffer (all asics).
|
||||
* doesn't check the max_dw limit as we may be reemitting
|
||||
* several submissions.
|
||||
*/
|
||||
static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw)
|
||||
{
|
||||
/* Align requested size with padding so unlock_commit can
|
||||
* pad safely */
|
||||
ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
|
||||
|
||||
ring->count_dw = ndw;
|
||||
ring->wptr_old = ring->wptr;
|
||||
|
||||
if (ring->funcs->begin_use)
|
||||
ring->funcs->begin_use(ring);
|
||||
}
|
||||
|
||||
/** amdgpu_ring_insert_nop - insert NOP packets
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
@ -333,6 +356,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
/* Initialize cached_rptr to 0 */
|
||||
ring->cached_rptr = 0;
|
||||
|
||||
if (!ring->ring_backup) {
|
||||
ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
|
||||
if (!ring->ring_backup)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Allocate ring buffer */
|
||||
if (ring->ring_obj == NULL) {
|
||||
r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
|
||||
@ -342,6 +371,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
(void **)&ring->ring);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring create failed\n", r);
|
||||
kvfree(ring->ring_backup);
|
||||
return r;
|
||||
}
|
||||
amdgpu_ring_clear_ring(ring);
|
||||
@ -385,6 +415,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
||||
amdgpu_bo_free_kernel(&ring->ring_obj,
|
||||
&ring->gpu_addr,
|
||||
(void **)&ring->ring);
|
||||
kvfree(ring->ring_backup);
|
||||
ring->ring_backup = NULL;
|
||||
|
||||
dma_fence_put(ring->vmid_wait);
|
||||
ring->vmid_wait = NULL;
|
||||
@ -427,6 +459,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
|
||||
{
|
||||
unsigned long flags;
|
||||
ktime_t deadline;
|
||||
bool ret;
|
||||
|
||||
if (unlikely(ring->adev->debug_disable_soft_recovery))
|
||||
return false;
|
||||
@ -441,12 +474,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
|
||||
dma_fence_set_error(fence, -ENODATA);
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
atomic_inc(&ring->adev->gpu_reset_counter);
|
||||
while (!dma_fence_is_signaled(fence) &&
|
||||
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
|
||||
ring->funcs->soft_recovery(ring, vmid);
|
||||
|
||||
return dma_fence_is_signaled(fence);
|
||||
ret = dma_fence_is_signaled(fence);
|
||||
/* increment the counter only if soft reset worked */
|
||||
if (ret)
|
||||
atomic_inc(&ring->adev->gpu_reset_counter);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -753,3 +790,69 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence *guilty_fence)
|
||||
{
|
||||
/* Stop the scheduler to prevent anybody else from touching the ring buffer. */
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
/* back up the non-guilty commands */
|
||||
amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
|
||||
}
|
||||
|
||||
int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence *guilty_fence)
|
||||
{
|
||||
unsigned int i;
|
||||
int r;
|
||||
|
||||
/* verify that the ring is functional */
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* signal the fence of the bad job */
|
||||
if (guilty_fence)
|
||||
amdgpu_fence_driver_guilty_force_completion(guilty_fence);
|
||||
/* Re-emit the non-guilty commands */
|
||||
if (ring->ring_backup_entries_to_copy) {
|
||||
amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy);
|
||||
for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
|
||||
amdgpu_ring_write(ring, ring->ring_backup[i]);
|
||||
amdgpu_ring_commit(ring);
|
||||
}
|
||||
/* Start the scheduler again */
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
|
||||
u32 reset_type)
|
||||
{
|
||||
switch (ring->funcs->type) {
|
||||
case AMDGPU_RING_TYPE_GFX:
|
||||
if (ring->adev->gfx.gfx_supported_reset & reset_type)
|
||||
return true;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_COMPUTE:
|
||||
if (ring->adev->gfx.compute_supported_reset & reset_type)
|
||||
return true;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_SDMA:
|
||||
if (ring->adev->sdma.supported_reset & reset_type)
|
||||
return true;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_VCN_DEC:
|
||||
case AMDGPU_RING_TYPE_VCN_ENC:
|
||||
if (ring->adev->vcn.supported_reset & reset_type)
|
||||
return true;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_VCN_JPEG:
|
||||
if (ring->adev->jpeg.supported_reset & reset_type)
|
||||
return true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -118,6 +118,7 @@ struct amdgpu_fence_driver {
|
||||
/* sync_seq is protected by ring emission lock */
|
||||
uint32_t sync_seq;
|
||||
atomic_t last_seq;
|
||||
u64 signalled_wptr;
|
||||
bool initialized;
|
||||
struct amdgpu_irq_src *irq_src;
|
||||
unsigned irq_type;
|
||||
@ -141,6 +142,12 @@ struct amdgpu_fence {
|
||||
/* RB, DMA, etc. */
|
||||
struct amdgpu_ring *ring;
|
||||
ktime_t start_timestamp;
|
||||
|
||||
/* wptr for the fence for resets */
|
||||
u64 wptr;
|
||||
/* fence context for resets */
|
||||
u64 context;
|
||||
uint32_t seq;
|
||||
};
|
||||
|
||||
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
|
||||
@ -148,6 +155,8 @@ extern const struct drm_sched_backend_ops amdgpu_sched_ops;
|
||||
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
|
||||
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||
void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence);
|
||||
void amdgpu_fence_save_wptr(struct dma_fence *fence);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
|
||||
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
@ -284,6 +293,9 @@ struct amdgpu_ring {
|
||||
|
||||
struct amdgpu_bo *ring_obj;
|
||||
uint32_t *ring;
|
||||
/* backups for resets */
|
||||
uint32_t *ring_backup;
|
||||
unsigned int ring_backup_entries_to_copy;
|
||||
unsigned rptr_offs;
|
||||
u64 rptr_gpu_addr;
|
||||
volatile u32 *rptr_cpu_addr;
|
||||
@ -550,4 +562,12 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
|
||||
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence *guilty_fence);
|
||||
void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence *guilty_fence);
|
||||
int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence *guilty_fence);
|
||||
bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
|
||||
u32 reset_type);
|
||||
#endif
|
||||
|
@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
|
||||
|
||||
mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
|
||||
mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
|
||||
mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
|
||||
atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
|
||||
INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
|
||||
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
|
||||
@ -1451,3 +1452,78 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vcn_reset_engine - Reset a specific VCN engine
|
||||
* @adev: Pointer to the AMDGPU device
|
||||
* @instance_id: VCN engine instance to reset
|
||||
*
|
||||
* Returns: 0 on success, or a negative error code on failure.
|
||||
*/
|
||||
static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
|
||||
uint32_t instance_id)
|
||||
{
|
||||
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
|
||||
int r, i;
|
||||
|
||||
mutex_lock(&vinst->engine_reset_mutex);
|
||||
/* Stop the scheduler's work queue for the dec and enc rings if they are running.
|
||||
* This ensures that no new tasks are submitted to the queues while
|
||||
* the reset is in progress.
|
||||
*/
|
||||
drm_sched_wqueue_stop(&vinst->ring_dec.sched);
|
||||
for (i = 0; i < vinst->num_enc_rings; i++)
|
||||
drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
|
||||
|
||||
/* Perform the VCN reset for the specified instance */
|
||||
r = vinst->reset(vinst);
|
||||
if (r)
|
||||
goto unlock;
|
||||
r = amdgpu_ring_test_ring(&vinst->ring_dec);
|
||||
if (r)
|
||||
goto unlock;
|
||||
for (i = 0; i < vinst->num_enc_rings; i++) {
|
||||
r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
|
||||
if (r)
|
||||
goto unlock;
|
||||
}
|
||||
amdgpu_fence_driver_force_completion(&vinst->ring_dec);
|
||||
for (i = 0; i < vinst->num_enc_rings; i++)
|
||||
amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
|
||||
|
||||
/* Restart the scheduler's work queue for the dec and enc rings
|
||||
* if they were stopped by this function. This allows new tasks
|
||||
* to be submitted to the queues after the reset is complete.
|
||||
*/
|
||||
drm_sched_wqueue_start(&vinst->ring_dec.sched);
|
||||
for (i = 0; i < vinst->num_enc_rings; i++)
|
||||
drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&vinst->engine_reset_mutex);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vcn_ring_reset - Reset a VCN ring
|
||||
* @ring: ring to reset
|
||||
* @vmid: vmid of guilty job
|
||||
* @timedout_fence: fence of timed out job
|
||||
*
|
||||
* This helper is for VCN blocks without unified queues because
|
||||
* resetting the engine resets all queues in that case. With
|
||||
* unified queues we have one queue per engine.
|
||||
* Returns: 0 on success, or a negative error code on failure.
|
||||
*/
|
||||
int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
|
||||
unsigned int vmid,
|
||||
struct amdgpu_fence *timedout_fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (adev->vcn.inst[ring->me].using_unified_queue)
|
||||
return -EINVAL;
|
||||
|
||||
return amdgpu_vcn_reset_engine(adev, ring->me);
|
||||
}
|
||||
|
@ -330,7 +330,9 @@ struct amdgpu_vcn_inst {
|
||||
struct dpg_pause_state *new_state);
|
||||
int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
|
||||
enum amd_powergating_state state);
|
||||
int (*reset)(struct amdgpu_vcn_inst *vinst);
|
||||
bool using_unified_queue;
|
||||
struct mutex engine_reset_mutex;
|
||||
};
|
||||
|
||||
struct amdgpu_vcn_ras {
|
||||
@ -552,5 +554,7 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
|
||||
|
||||
int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
|
||||
enum amd_powergating_state state);
|
||||
|
||||
int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
|
||||
unsigned int vmid,
|
||||
struct amdgpu_fence *guilty_fence);
|
||||
#endif
|
||||
|
@ -152,8 +152,10 @@ enum AMDGIM_REG_ACCESS_FLAG {
|
||||
AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
|
||||
/* Use RLC to program GC regs */
|
||||
AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
|
||||
/* Use PSP to program L1_TLB_CNTL*/
|
||||
/* Use PSP to program L1_TLB_CNTL */
|
||||
AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
|
||||
/* Use RLCG to program SQ_CONFIG1 */
|
||||
AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4),
|
||||
};
|
||||
|
||||
struct amdgim_pf2vf_info_v1 {
|
||||
@ -346,6 +348,10 @@ struct amdgpu_video_codec_info;
|
||||
#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
|
||||
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
|
||||
|
||||
#define amdgpu_sriov_reg_access_sq_config(adev) \
|
||||
(amdgpu_sriov_vf((adev)) && \
|
||||
((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG)))
|
||||
|
||||
#define amdgpu_passthrough(adev) \
|
||||
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
|
||||
|
||||
|
@ -765,6 +765,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
bool cleaner_shader_needed = false;
|
||||
bool pasid_mapping_needed = false;
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_fence *af;
|
||||
unsigned int patch;
|
||||
int r;
|
||||
|
||||
@ -830,6 +831,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
|
||||
if (r)
|
||||
return r;
|
||||
/* this is part of the job's context */
|
||||
af = container_of(fence, struct amdgpu_fence, base);
|
||||
af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
|
||||
}
|
||||
|
||||
if (vm_flush_needed) {
|
||||
|
@ -66,7 +66,10 @@ to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
|
||||
|
||||
static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
|
||||
{
|
||||
to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED;
|
||||
struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res);
|
||||
|
||||
WARN_ON(ares->flags & DRM_BUDDY_CLEARED);
|
||||
ares->flags |= DRM_BUDDY_CLEARED;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -218,15 +218,27 @@ int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
|
||||
return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
|
||||
}
|
||||
|
||||
static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr)
|
||||
{
|
||||
if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
|
||||
return true;
|
||||
|
||||
if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
|
||||
xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
|
||||
return true;
|
||||
|
||||
if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE &&
|
||||
xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
|
||||
{
|
||||
int mode;
|
||||
|
||||
if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
|
||||
xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
|
||||
return xcp_mgr->mode;
|
||||
|
||||
if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
|
||||
if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr))
|
||||
return xcp_mgr->mode;
|
||||
|
||||
if (!(flags & AMDGPU_XCP_FL_LOCKED))
|
||||
|
@ -113,7 +113,8 @@ union amd_sriov_reg_access_flags {
|
||||
uint32_t vf_reg_access_mmhub : 1;
|
||||
uint32_t vf_reg_access_gc : 1;
|
||||
uint32_t vf_reg_access_l1_tlb_cntl : 1;
|
||||
uint32_t reserved : 28;
|
||||
uint32_t vf_reg_access_sq_config : 1;
|
||||
uint32_t reserved : 27;
|
||||
} flags;
|
||||
uint32_t all;
|
||||
};
|
||||
|
@ -4952,11 +4952,15 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
}
|
||||
}
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
}
|
||||
|
||||
r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
|
||||
if (r) {
|
||||
@ -9046,21 +9050,6 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
|
||||
ref, mask);
|
||||
}
|
||||
|
||||
static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
|
||||
unsigned int vmid)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t value = 0;
|
||||
|
||||
value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
|
||||
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
|
||||
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
uint32_t me, uint32_t pipe,
|
||||
@ -9534,13 +9523,10 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring,
|
||||
u64 addr;
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
|
||||
return -EINVAL;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||
|
||||
@ -9589,12 +9575,7 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
|
||||
@ -9607,13 +9588,10 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
|
||||
unsigned long flags;
|
||||
int i, r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
|
||||
return -EINVAL;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||
|
||||
@ -9625,9 +9603,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
|
||||
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
|
||||
0, 0);
|
||||
amdgpu_ring_commit(kiq_ring);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
|
||||
r = amdgpu_ring_test_ring(kiq_ring);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -9663,18 +9640,12 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
|
||||
}
|
||||
kiq->pmf->kiq_map_queues(kiq_ring, ring);
|
||||
amdgpu_ring_commit(kiq_ring);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
|
||||
r = amdgpu_ring_test_ring(kiq_ring);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
|
||||
@ -9909,7 +9880,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
|
||||
.emit_wreg = gfx_v10_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
|
||||
.soft_recovery = gfx_v10_0_ring_soft_recovery,
|
||||
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
|
||||
.reset = gfx_v10_0_reset_kgq,
|
||||
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
|
||||
@ -9950,7 +9920,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
|
||||
.emit_wreg = gfx_v10_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
|
||||
.soft_recovery = gfx_v10_0_ring_soft_recovery,
|
||||
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
|
||||
.reset = gfx_v10_0_reset_kcq,
|
||||
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
|
||||
|
@ -1806,12 +1806,17 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
case IP_VERSION(11, 0, 2):
|
||||
case IP_VERSION(11, 0, 3):
|
||||
if ((adev->gfx.me_fw_version >= 2280) &&
|
||||
(adev->gfx.mec_fw_version >= 2410)) {
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
(adev->gfx.mec_fw_version >= 2410) &&
|
||||
!amdgpu_sriov_vf(adev)) {
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -6283,21 +6288,6 @@ static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
|
||||
ref, mask, 0x20);
|
||||
}
|
||||
|
||||
static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
|
||||
unsigned vmid)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t value = 0;
|
||||
|
||||
value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
|
||||
WREG32_SOC15(GC, 0, regSQ_CMD, value);
|
||||
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
uint32_t me, uint32_t pipe,
|
||||
@ -6818,10 +6808,7 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
|
||||
if (r) {
|
||||
@ -6844,12 +6831,7 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
|
||||
@ -6989,10 +6971,7 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring,
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int r = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
|
||||
if (r) {
|
||||
@ -7013,12 +6992,7 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring,
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
|
||||
@ -7254,7 +7228,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
|
||||
.emit_wreg = gfx_v11_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
|
||||
.soft_recovery = gfx_v11_0_ring_soft_recovery,
|
||||
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
|
||||
.reset = gfx_v11_0_reset_kgq,
|
||||
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
|
||||
@ -7296,7 +7269,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
|
||||
.emit_wreg = gfx_v11_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
|
||||
.soft_recovery = gfx_v11_0_ring_soft_recovery,
|
||||
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
|
||||
.reset = gfx_v11_0_reset_kcq,
|
||||
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
|
||||
|
@ -1542,10 +1542,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
case IP_VERSION(12, 0, 0):
|
||||
case IP_VERSION(12, 0, 1):
|
||||
if ((adev->gfx.me_fw_version >= 2660) &&
|
||||
(adev->gfx.mec_fw_version >= 2920)) {
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
(adev->gfx.mec_fw_version >= 2920) &&
|
||||
!amdgpu_sriov_vf(adev)) {
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!adev->enable_mes_kiq) {
|
||||
@ -4690,21 +4694,6 @@ static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
|
||||
ref, mask, 0x20);
|
||||
}
|
||||
|
||||
static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring,
|
||||
unsigned vmid)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t value = 0;
|
||||
|
||||
value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
|
||||
WREG32_SOC15(GC, 0, regSQ_CMD, value);
|
||||
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
uint32_t me, uint32_t pipe,
|
||||
@ -5314,10 +5303,7 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
|
||||
if (r) {
|
||||
@ -5339,12 +5325,7 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
|
||||
@ -5437,10 +5418,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
|
||||
if (r) {
|
||||
@ -5461,12 +5439,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring)
|
||||
@ -5544,7 +5517,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
|
||||
.emit_wreg = gfx_v12_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
|
||||
.soft_recovery = gfx_v12_0_ring_soft_recovery,
|
||||
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
|
||||
.reset = gfx_v12_0_reset_kgq,
|
||||
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
|
||||
@ -5583,7 +5555,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
|
||||
.emit_wreg = gfx_v12_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
|
||||
.soft_recovery = gfx_v12_0_ring_soft_recovery,
|
||||
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
|
||||
.reset = gfx_v12_0_reset_kcq,
|
||||
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
|
||||
|
@ -4640,6 +4640,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
|
||||
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
|
||||
/* reset ring buffer */
|
||||
ring->wptr = 0;
|
||||
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
|
||||
amdgpu_ring_clear_ring(ring);
|
||||
}
|
||||
return 0;
|
||||
|
@ -2410,6 +2410,8 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
|
||||
if (r) {
|
||||
@ -7181,13 +7183,10 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
|
||||
unsigned long flags;
|
||||
int i, r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
|
||||
return -EINVAL;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||
|
||||
@ -7238,19 +7237,13 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
|
||||
}
|
||||
kiq->pmf->kiq_map_queues(kiq_ring, ring);
|
||||
amdgpu_ring_commit(kiq_ring);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
r = amdgpu_ring_test_ring(kiq_ring);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
if (r) {
|
||||
DRM_ERROR("fail to remap queue\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
|
||||
|
@ -1148,13 +1148,15 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(9, 4, 3):
|
||||
case IP_VERSION(9, 4, 4):
|
||||
if (adev->gfx.mec_fw_version >= 155) {
|
||||
if ((adev->gfx.mec_fw_version >= 155) &&
|
||||
!amdgpu_sriov_vf(adev)) {
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
|
||||
}
|
||||
break;
|
||||
case IP_VERSION(9, 5, 0):
|
||||
if (adev->gfx.mec_fw_version >= 21) {
|
||||
if ((adev->gfx.mec_fw_version >= 21) &&
|
||||
!amdgpu_sriov_vf(adev)) {
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
|
||||
}
|
||||
@ -1349,7 +1351,9 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
/* ToDo: GC 9.4.4 */
|
||||
case IP_VERSION(9, 4, 3):
|
||||
if (adev->gfx.mec_fw_version >= 184)
|
||||
if (adev->gfx.mec_fw_version >= 184 &&
|
||||
(amdgpu_sriov_reg_access_sq_config(adev) ||
|
||||
!amdgpu_sriov_vf(adev)))
|
||||
adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
|
||||
break;
|
||||
case IP_VERSION(9, 5, 0):
|
||||
@ -3561,13 +3565,10 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
|
||||
unsigned long flags;
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
|
||||
return -EINVAL;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||
|
||||
@ -3594,7 +3595,9 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
|
||||
dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
|
||||
|
||||
pipe_reset:
|
||||
if(r) {
|
||||
if (r) {
|
||||
if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
|
||||
return -EOPNOTSUPP;
|
||||
r = gfx_v9_4_3_reset_hw_pipe(ring);
|
||||
dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
|
||||
r ? "failed" : "successfully");
|
||||
@ -3615,20 +3618,14 @@ pipe_reset:
|
||||
}
|
||||
kiq->pmf->kiq_map_queues(kiq_ring, ring);
|
||||
amdgpu_ring_commit(kiq_ring);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
|
||||
r = amdgpu_ring_test_ring(kiq_ring);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "fail to remap queue\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
enum amdgpu_gfx_cp_ras_mem_id {
|
||||
|
@ -1121,8 +1121,8 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
||||
}
|
||||
|
||||
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo,
|
||||
struct amdgpu_bo_va_mapping *mapping,
|
||||
uint64_t *flags)
|
||||
{
|
||||
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
@ -1132,7 +1132,6 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
|
||||
AMDGPU_GEM_CREATE_EXT_COHERENT);
|
||||
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
|
||||
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
|
||||
struct amdgpu_vm *vm = mapping->bo_va->base.vm;
|
||||
unsigned int mtype_local, mtype;
|
||||
uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
|
||||
bool snoop = false;
|
||||
@ -1162,7 +1161,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
|
||||
mtype = MTYPE_UC;
|
||||
else
|
||||
mtype = MTYPE_NC;
|
||||
if (mapping->bo_va->is_xgmi)
|
||||
if (amdgpu_xgmi_same_hive(adev, bo_adev))
|
||||
snoop = true;
|
||||
}
|
||||
} else {
|
||||
@ -1254,7 +1253,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
if ((*flags & AMDGPU_PTE_VALID) && bo)
|
||||
gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
|
||||
gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo,
|
||||
flags);
|
||||
}
|
||||
|
||||
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
|
||||
|
@ -183,15 +183,16 @@ exit:
|
||||
|
||||
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
|
||||
{
|
||||
const struct software_node *amd_camera_node, *isp4_node;
|
||||
struct amdgpu_device *adev = isp->adev;
|
||||
struct acpi_device *acpi_dev;
|
||||
int idx, int_idx, num_res, r;
|
||||
u8 isp_dev_hid[ACPI_ID_LEN];
|
||||
u64 isp_base;
|
||||
|
||||
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_acpi_get_isp4_dev_hid(&isp_dev_hid);
|
||||
r = amdgpu_acpi_get_isp4_dev(&acpi_dev);
|
||||
if (r) {
|
||||
drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r);
|
||||
/* allow GPU init to progress */
|
||||
@ -199,7 +200,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
|
||||
}
|
||||
|
||||
/* add GPIO resources required for OMNI5C10 sensor */
|
||||
if (!strcmp("OMNI5C10", isp_dev_hid)) {
|
||||
if (!strcmp("OMNI5C10", acpi_device_hid(acpi_dev))) {
|
||||
gpiod_add_lookup_table(&isp_gpio_table);
|
||||
gpiod_add_lookup_table(&isp_sensor_gpio_table);
|
||||
}
|
||||
@ -241,6 +242,9 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
|
||||
goto failure;
|
||||
}
|
||||
|
||||
amd_camera_node = (const struct software_node *)acpi_dev->driver_data;
|
||||
isp4_node = software_node_find_by_name(amd_camera_node, "isp4");
|
||||
|
||||
/* initialize isp platform data */
|
||||
isp->isp_pdata->adev = (void *)adev;
|
||||
isp->isp_pdata->asic_type = adev->asic_type;
|
||||
@ -269,6 +273,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
|
||||
isp->isp_cell[0].num_resources = num_res;
|
||||
isp->isp_cell[0].resources = &isp->isp_res[0];
|
||||
isp->isp_cell[0].platform_data = isp->isp_pdata;
|
||||
isp->isp_cell[0].swnode = isp4_node;
|
||||
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
|
||||
|
||||
/* initialize isp i2c platform data */
|
||||
|
@ -118,7 +118,10 @@ static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
|
||||
return r;
|
||||
@ -770,15 +773,14 @@ static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring,
|
||||
{
|
||||
int r;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
jpeg_v2_0_stop(ring->adev);
|
||||
jpeg_v2_0_start(ring->adev);
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
r = jpeg_v2_0_stop(ring->adev);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
r = jpeg_v2_0_start(ring->adev);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
|
||||
|
@ -167,7 +167,10 @@ static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
|
||||
return r;
|
||||
@ -647,17 +650,10 @@ static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring,
|
||||
unsigned int vmid,
|
||||
struct amdgpu_fence *timedout_fence)
|
||||
{
|
||||
int r;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
jpeg_v2_5_stop_inst(ring->adev, ring->me);
|
||||
jpeg_v2_5_start_inst(ring->adev, ring->me);
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
|
||||
|
@ -132,7 +132,10 @@ static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
|
||||
return r;
|
||||
@ -561,15 +564,14 @@ static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring,
|
||||
{
|
||||
int r;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
jpeg_v3_0_stop(ring->adev);
|
||||
jpeg_v3_0_start(ring->adev);
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
r = jpeg_v3_0_stop(ring->adev);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
r = jpeg_v3_0_start(ring->adev);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
|
||||
|
@ -143,7 +143,10 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
|
||||
return r;
|
||||
@ -726,18 +729,14 @@ static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring,
|
||||
{
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(ring->adev))
|
||||
return -EINVAL;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
jpeg_v4_0_stop(ring->adev);
|
||||
jpeg_v4_0_start(ring->adev);
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
r = jpeg_v4_0_stop(ring->adev);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
r = jpeg_v4_0_start(ring->adev);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
|
||||
|
@ -216,12 +216,11 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -242,8 +241,7 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_jpeg_sysfs_reset_mask_fini(adev);
|
||||
amdgpu_jpeg_sysfs_reset_mask_fini(adev);
|
||||
|
||||
r = amdgpu_jpeg_sw_fini(adev);
|
||||
|
||||
@ -1147,20 +1145,13 @@ static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring,
|
||||
unsigned int vmid,
|
||||
struct amdgpu_fence *timedout_fence)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(ring->adev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
jpeg_v4_0_3_core_stall_reset(ring);
|
||||
jpeg_v4_0_3_start_jrbc(ring);
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
|
||||
|
@ -174,9 +174,10 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
@ -767,6 +768,22 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jpeg_v4_0_5_ring_reset(struct amdgpu_ring *ring,
|
||||
unsigned int vmid,
|
||||
struct amdgpu_fence *timedout_fence)
|
||||
{
|
||||
int r;
|
||||
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
r = jpeg_v4_0_5_stop(ring->adev);
|
||||
if (r)
|
||||
return r;
|
||||
r = jpeg_v4_0_5_start(ring->adev);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
|
||||
.name = "jpeg_v4_0_5",
|
||||
.early_init = jpeg_v4_0_5_early_init,
|
||||
@ -812,6 +829,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
|
||||
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
|
||||
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
.reset = jpeg_v4_0_5_ring_reset,
|
||||
};
|
||||
|
||||
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -120,13 +120,13 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
return 0;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -644,6 +644,22 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jpeg_v5_0_0_ring_reset(struct amdgpu_ring *ring,
|
||||
unsigned int vmid,
|
||||
struct amdgpu_fence *timedout_fence)
|
||||
{
|
||||
int r;
|
||||
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
r = jpeg_v5_0_0_stop(ring->adev);
|
||||
if (r)
|
||||
return r;
|
||||
r = jpeg_v5_0_0_start(ring->adev);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
|
||||
.name = "jpeg_v5_0_0",
|
||||
.early_init = jpeg_v5_0_0_early_init,
|
||||
@ -689,6 +705,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
|
||||
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
|
||||
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
.reset = jpeg_v5_0_0_ring_reset,
|
||||
};
|
||||
|
||||
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -200,14 +200,13 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -226,8 +225,7 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_jpeg_sysfs_reset_mask_fini(adev);
|
||||
amdgpu_jpeg_sysfs_reset_mask_fini(adev);
|
||||
|
||||
r = amdgpu_jpeg_sw_fini(adev);
|
||||
|
||||
@ -838,20 +836,10 @@ static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring,
|
||||
unsigned int vmid,
|
||||
struct amdgpu_fence *timedout_fence)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(ring->adev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
jpeg_v5_0_1_core_stall_reset(ring);
|
||||
jpeg_v5_0_1_init_jrbc(ring);
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
|
||||
|
@ -1664,9 +1664,6 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring,
|
||||
u32 id = ring->me;
|
||||
int r;
|
||||
|
||||
if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
amdgpu_amdkfd_suspend(adev, true);
|
||||
r = amdgpu_sdma_reset_engine(adev, id, false);
|
||||
amdgpu_amdkfd_resume(adev, true);
|
||||
|
@ -1428,7 +1428,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
case IP_VERSION(5, 0, 0):
|
||||
case IP_VERSION(5, 0, 2):
|
||||
case IP_VERSION(5, 0, 5):
|
||||
if (adev->sdma.instance[0].fw_version >= 35)
|
||||
if ((adev->sdma.instance[0].fw_version >= 35) &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
@ -1544,14 +1545,22 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence *timedout_fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 inst_id = ring->me;
|
||||
int r;
|
||||
|
||||
amdgpu_amdkfd_suspend(adev, true);
|
||||
r = amdgpu_sdma_reset_engine(adev, inst_id, false);
|
||||
amdgpu_amdkfd_resume(adev, true);
|
||||
if (ring->me >= adev->sdma.num_instances) {
|
||||
dev_err(adev->dev, "sdma instance not found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return r;
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
amdgpu_amdkfd_suspend(adev, true);
|
||||
r = amdgpu_sdma_reset_engine(adev, ring->me, true);
|
||||
amdgpu_amdkfd_resume(adev, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
|
||||
|
@ -1347,11 +1347,13 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
case IP_VERSION(5, 2, 2):
|
||||
case IP_VERSION(5, 2, 3):
|
||||
case IP_VERSION(5, 2, 4):
|
||||
if (adev->sdma.instance[0].fw_version >= 76)
|
||||
if ((adev->sdma.instance[0].fw_version >= 76) &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
case IP_VERSION(5, 2, 5):
|
||||
if (adev->sdma.instance[0].fw_version >= 34)
|
||||
if ((adev->sdma.instance[0].fw_version >= 34) &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
@ -1457,14 +1459,22 @@ static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence *timedout_fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 inst_id = ring->me;
|
||||
int r;
|
||||
|
||||
amdgpu_amdkfd_suspend(adev, true);
|
||||
r = amdgpu_sdma_reset_engine(adev, inst_id, false);
|
||||
amdgpu_amdkfd_resume(adev, true);
|
||||
if (ring->me >= adev->sdma.num_instances) {
|
||||
dev_err(adev->dev, "sdma instance not found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return r;
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
amdgpu_amdkfd_suspend(adev, true);
|
||||
r = amdgpu_sdma_reset_engine(adev, ring->me, true);
|
||||
amdgpu_amdkfd_resume(adev, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)
|
||||
|
@ -1355,7 +1355,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
case IP_VERSION(6, 0, 0):
|
||||
case IP_VERSION(6, 0, 2):
|
||||
case IP_VERSION(6, 0, 3):
|
||||
if (adev->sdma.instance[0].fw_version >= 21)
|
||||
if ((adev->sdma.instance[0].fw_version >= 21) &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
@ -1575,33 +1576,24 @@ static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence *timedout_fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int i, r;
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (ring == &adev->sdma.instance[i].ring)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == adev->sdma.num_instances) {
|
||||
DRM_ERROR("sdma instance not found\n");
|
||||
if (ring->me >= adev->sdma.num_instances) {
|
||||
dev_err(adev->dev, "sdma instance not found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = sdma_v6_0_gfx_resume_instance(adev, i, true);
|
||||
r = sdma_v6_0_gfx_resume_instance(adev, ring->me, true);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
|
@ -807,33 +807,24 @@ static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence *timedout_fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int i, r;
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (ring == &adev->sdma.instance[i].ring)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == adev->sdma.num_instances) {
|
||||
DRM_ERROR("sdma instance not found\n");
|
||||
if (ring->me >= adev->sdma.num_instances) {
|
||||
dev_err(adev->dev, "sdma instance not found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = sdma_v7_0_gfx_resume_instance(adev, i, true);
|
||||
r = sdma_v7_0_gfx_resume_instance(adev, ring->me, true);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1346,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
adev->sdma.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
|
||||
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
|
@ -98,6 +98,8 @@ static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
|
||||
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
|
||||
struct dpg_pause_state *new_state);
|
||||
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
|
||||
static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst);
|
||||
|
||||
/**
|
||||
* vcn_v2_0_early_init - set function pointers and load microcode
|
||||
*
|
||||
@ -213,6 +215,12 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
|
||||
adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
|
||||
adev->vcn.inst[0].reset = vcn_v2_0_reset;
|
||||
|
||||
adev->vcn.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
r = amdgpu_virt_alloc_mm_table(adev);
|
||||
if (r)
|
||||
@ -233,6 +241,10 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
adev->vcn.ip_dump = ptr;
|
||||
}
|
||||
|
||||
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -260,6 +272,8 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_vcn_sysfs_reset_mask_fini(adev);
|
||||
|
||||
r = amdgpu_vcn_sw_fini(adev, 0);
|
||||
|
||||
kfree(adev->vcn.ip_dump);
|
||||
@ -1355,6 +1369,16 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = vcn_v2_0_stop(vinst);
|
||||
if (r)
|
||||
return r;
|
||||
return vcn_v2_0_start(vinst);
|
||||
}
|
||||
|
||||
static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
@ -2176,6 +2200,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
|
||||
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
.reset = amdgpu_vcn_ring_reset,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
|
||||
@ -2205,6 +2230,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
|
||||
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
.reset = amdgpu_vcn_ring_reset,
|
||||
};
|
||||
|
||||
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -102,6 +102,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
|
||||
struct dpg_pause_state *new_state);
|
||||
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
|
||||
static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
|
||||
static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst);
|
||||
|
||||
static int amdgpu_ih_clientid_vcns[] = {
|
||||
SOC15_IH_CLIENTID_VCN,
|
||||
@ -404,8 +405,14 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
|
||||
adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
|
||||
adev->vcn.inst[j].reset = vcn_v2_5_reset;
|
||||
}
|
||||
|
||||
adev->vcn.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_virt_alloc_mm_table(adev);
|
||||
if (r)
|
||||
@ -425,6 +432,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
adev->vcn.ip_dump = ptr;
|
||||
}
|
||||
|
||||
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -455,6 +466,8 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_free_mm_table(adev);
|
||||
|
||||
amdgpu_vcn_sysfs_reset_mask_fini(adev);
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
r = amdgpu_vcn_suspend(adev, i);
|
||||
if (r)
|
||||
@ -1816,6 +1829,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
|
||||
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
.reset = amdgpu_vcn_ring_reset,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1914,6 +1928,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
|
||||
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
.reset = amdgpu_vcn_ring_reset,
|
||||
};
|
||||
|
||||
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
@ -1942,6 +1957,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = vcn_v2_5_stop(vinst);
|
||||
if (r)
|
||||
return r;
|
||||
return vcn_v2_5_start(vinst);
|
||||
}
|
||||
|
||||
static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
@ -110,6 +110,7 @@ static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
|
||||
enum amd_powergating_state state);
|
||||
static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
|
||||
struct dpg_pause_state *new_state);
|
||||
static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst);
|
||||
|
||||
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
|
||||
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
|
||||
@ -289,8 +290,14 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
|
||||
adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
|
||||
adev->vcn.inst[i].reset = vcn_v3_0_reset;
|
||||
}
|
||||
|
||||
adev->vcn.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_virt_alloc_mm_table(adev);
|
||||
if (r)
|
||||
@ -306,6 +313,10 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
adev->vcn.ip_dump = ptr;
|
||||
}
|
||||
|
||||
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -338,6 +349,8 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_free_mm_table(adev);
|
||||
|
||||
amdgpu_vcn_sysfs_reset_mask_fini(adev);
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
r = amdgpu_vcn_suspend(adev, i);
|
||||
if (r)
|
||||
@ -2033,6 +2046,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
|
||||
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
.reset = amdgpu_vcn_ring_reset,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -2131,6 +2145,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
|
||||
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
.reset = amdgpu_vcn_ring_reset,
|
||||
};
|
||||
|
||||
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
@ -2164,6 +2179,18 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = vcn_v3_0_stop(vinst);
|
||||
if (r)
|
||||
return r;
|
||||
vcn_v3_0_enable_clock_gating(vinst);
|
||||
vcn_v3_0_enable_static_power_gating(vinst);
|
||||
return vcn_v3_0_start(vinst);
|
||||
}
|
||||
|
||||
static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
@ -241,7 +241,8 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
adev->vcn.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_virt_alloc_mm_table(adev);
|
||||
@ -1975,19 +1976,14 @@ static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
|
||||
int r;
|
||||
|
||||
if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
vcn_v4_0_stop(vinst);
|
||||
vcn_v4_0_start(vinst);
|
||||
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
r = vcn_v4_0_stop(vinst);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
r = vcn_v4_0_start(vinst);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
|
||||
|
@ -1603,13 +1603,7 @@ static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring,
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
|
||||
|
||||
if (amdgpu_sriov_vf(ring->adev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
|
||||
vcn_inst = GET_INST(VCN, ring->me);
|
||||
r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
|
||||
@ -1624,12 +1618,8 @@ static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring,
|
||||
adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
|
||||
vcn_v4_0_3_hw_init_inst(vinst);
|
||||
vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram);
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
|
||||
|
@ -220,7 +220,8 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
|
||||
adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
@ -1473,19 +1474,14 @@ static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
|
||||
int r;
|
||||
|
||||
if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
vcn_v4_0_5_stop(vinst);
|
||||
vcn_v4_0_5_start(vinst);
|
||||
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
r = vcn_v4_0_5_stop(vinst);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
r = vcn_v4_0_5_start(vinst);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
|
||||
|
@ -198,7 +198,8 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
adev->vcn.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
vcn_v5_0_0_alloc_ip_dump(adev);
|
||||
|
||||
@ -1200,19 +1201,14 @@ static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
|
||||
int r;
|
||||
|
||||
if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
drm_sched_wqueue_stop(&ring->sched);
|
||||
vcn_v5_0_0_stop(vinst);
|
||||
vcn_v5_0_0_start(vinst);
|
||||
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
|
||||
r = vcn_v5_0_0_stop(vinst);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
drm_sched_wqueue_start(&ring->sched);
|
||||
return 0;
|
||||
r = vcn_v5_0_0_start(vinst);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
config HSA_AMD
|
||||
bool "HSA kernel driver for AMD GPU devices"
|
||||
depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT))
|
||||
depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT) || (LOONGARCH && 64BIT))
|
||||
select HMM_MIRROR
|
||||
select MMU_NOTIFIER
|
||||
select DRM_AMDGPU_USERPTR
|
||||
|
@ -728,7 +728,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
|
||||
* support programmable degamma anywhere.
|
||||
*/
|
||||
is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
|
||||
drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
|
||||
/* Dont't enable DRM CRTC degamma property for DCN401 since the
|
||||
* pre-blending degamma LUT doesn't apply to cursor, and therefore
|
||||
* can't work similar to a post-blending degamma LUT as in other hw
|
||||
* versions.
|
||||
* TODO: revisit it once KMS plane color API is merged.
|
||||
*/
|
||||
drm_crtc_enable_color_mgmt(&acrtc->base,
|
||||
(is_dcn &&
|
||||
dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ?
|
||||
MAX_COLOR_LUT_ENTRIES : 0,
|
||||
true, MAX_COLOR_LUT_ENTRIES);
|
||||
|
||||
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
|
||||
|
@ -112,7 +112,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
|
||||
###############################################################################
|
||||
# DCN30
|
||||
###############################################################################
|
||||
CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o
|
||||
CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o dcn30m_clk_mgr.o dcn30m_clk_mgr_smu_msg.o
|
||||
|
||||
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
|
||||
|
||||
|
@ -67,7 +67,7 @@ int clk_mgr_helper_get_active_display_cnt(
|
||||
if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
|
||||
continue;
|
||||
|
||||
if (!stream->dpms_off || (stream_status && stream_status->plane_count))
|
||||
if (!stream->dpms_off || dc->is_switch_in_progress_dest || (stream_status && stream_status->plane_count))
|
||||
display_count++;
|
||||
}
|
||||
|
||||
|
@ -56,6 +56,7 @@
|
||||
#define DALSMC_MSG_SetDisplayRefreshFromMall 0xF
|
||||
#define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10
|
||||
#define DALSMC_MSG_BacoAudioD3PME 0x11
|
||||
#define DALSMC_Message_Count 0x12
|
||||
#define DALSMC_MSG_SmartAccess 0x12
|
||||
#define DALSMC_Message_Count 0x13
|
||||
|
||||
#endif
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "dce100/dce_clk_mgr.h"
|
||||
#include "dcn30/dcn30_clk_mgr.h"
|
||||
#include "dml/dcn30/dcn30_fpu.h"
|
||||
#include "dcn30/dcn30m_clk_mgr.h"
|
||||
#include "reg_helper.h"
|
||||
#include "core_types.h"
|
||||
#include "dm_helpers.h"
|
||||
@ -498,7 +499,8 @@ static struct clk_mgr_funcs dcn3_funcs = {
|
||||
.are_clock_states_equal = dcn3_are_clock_states_equal,
|
||||
.enable_pme_wa = dcn3_enable_pme_wa,
|
||||
.notify_link_rate_change = dcn30_notify_link_rate_change,
|
||||
.is_smu_present = dcn3_is_smu_present
|
||||
.is_smu_present = dcn3_is_smu_present,
|
||||
.set_smartmux_switch = dcn30m_set_smartmux_switch
|
||||
};
|
||||
|
||||
static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
|
||||
|
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "clk_mgr_internal.h"
|
||||
#include "dcn30/dcn30m_clk_mgr.h"
|
||||
#include "dcn30m_clk_mgr_smu_msg.h"
|
||||
|
||||
|
||||
uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
|
||||
return dcn30m_smu_set_smart_mux_switch(clk_mgr, pins_to_set);
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DCN30M_CLK_MGR_H__
|
||||
#define __DCN30M_CLK_MGR_H__
|
||||
|
||||
uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set);
|
||||
|
||||
#endif //__DCN30M_CLK_MGR_H__
|
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "dcn30m_clk_mgr_smu_msg.h"
|
||||
|
||||
#include "clk_mgr_internal.h"
|
||||
#include "reg_helper.h"
|
||||
#include "dm_helpers.h"
|
||||
|
||||
#include "dalsmc.h"
|
||||
|
||||
#define mmDAL_MSG_REG 0x1628A
|
||||
#define mmDAL_ARG_REG 0x16273
|
||||
#define mmDAL_RESP_REG 0x16274
|
||||
|
||||
#define REG(reg_name) \
|
||||
mm ## reg_name
|
||||
|
||||
#include "logger_types.h"
|
||||
#undef DC_LOGGER
|
||||
#define DC_LOGGER \
|
||||
CTX->logger
|
||||
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
|
||||
|
||||
|
||||
/*
|
||||
* Function to be used instead of REG_WAIT macro because the wait ends when
|
||||
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
|
||||
* won't work with REG_WAIT.
|
||||
*/
|
||||
static uint32_t dcn30m_smu_wait_for_response(struct clk_mgr_internal *clk_mgr,
|
||||
unsigned int delay_us, unsigned int max_retries)
|
||||
{
|
||||
uint32_t reg = 0;
|
||||
|
||||
do {
|
||||
reg = REG_READ(DAL_RESP_REG);
|
||||
if (reg)
|
||||
break;
|
||||
|
||||
if (delay_us >= 1000)
|
||||
msleep(delay_us/1000);
|
||||
else if (delay_us > 0)
|
||||
udelay(delay_us);
|
||||
} while (max_retries--);
|
||||
|
||||
/* handle DALSMC_Result_CmdRejectedBusy? */
|
||||
|
||||
/* Log? */
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
static bool dcn30m_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
||||
uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
|
||||
{
|
||||
uint32_t result;
|
||||
/* Wait for response register to be ready */
|
||||
dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
|
||||
/* Clear response register */
|
||||
REG_WRITE(DAL_RESP_REG, 0);
|
||||
|
||||
/* Set the parameter register for the SMU message */
|
||||
REG_WRITE(DAL_ARG_REG, param_in);
|
||||
|
||||
/* Trigger the message transaction by writing the message ID */
|
||||
REG_WRITE(DAL_MSG_REG, msg_id);
|
||||
|
||||
result = dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
|
||||
if (IS_SMU_TIMEOUT(result))
|
||||
dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000);
|
||||
|
||||
/* Wait for response */
|
||||
if (result == DALSMC_Result_OK) {
|
||||
if (param_out)
|
||||
*param_out = REG_READ(DAL_ARG_REG);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set)
|
||||
{
|
||||
uint32_t response = 0;
|
||||
|
||||
smu_print("SMU Set SmartMux Switch: switch_dgpu = %d\n", pins_to_set);
|
||||
|
||||
dcn30m_smu_send_msg_with_param(clk_mgr,
|
||||
DALSMC_MSG_SmartAccess, pins_to_set, &response);
|
||||
|
||||
return response;
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
|
||||
#define DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
|
||||
|
||||
#include "core_types.h"
|
||||
|
||||
struct clk_mgr_internal;
|
||||
|
||||
uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set);
|
||||
#endif /* DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ */
|
@ -1500,6 +1500,35 @@ static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
|
||||
unsigned int num_clk_levels;
|
||||
|
||||
switch (clk_type) {
|
||||
case CLK_TYPE_DISPCLK:
|
||||
num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
|
||||
return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
|
||||
clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 :
|
||||
clk_mgr->base.boot_snapshot.dispclk;
|
||||
case CLK_TYPE_DPPCLK:
|
||||
num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
|
||||
return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK) ?
|
||||
clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 :
|
||||
clk_mgr->base.boot_snapshot.dppclk;
|
||||
case CLK_TYPE_DSCCLK:
|
||||
num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
|
||||
return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
|
||||
clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 :
|
||||
clk_mgr->base.boot_snapshot.dispclk / 3;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct clk_mgr_funcs dcn401_funcs = {
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz,
|
||||
@ -1516,6 +1545,7 @@ static struct clk_mgr_funcs dcn401_funcs = {
|
||||
.get_hard_min_memclk = dcn401_get_hard_min_memclk,
|
||||
.get_hard_min_fclk = dcn401_get_hard_min_fclk,
|
||||
.is_dc_mode_present = dcn401_is_dc_mode_present,
|
||||
.get_max_clock_khz = dcn401_get_max_clock_khz,
|
||||
};
|
||||
|
||||
struct clk_mgr_internal *dcn401_clk_mgr_construct(
|
||||
@ -1576,7 +1606,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
|
||||
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
|
||||
if (!clk_mgr->base.bw_params) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
kfree(clk_mgr);
|
||||
kfree(clk_mgr401);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1587,6 +1617,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
|
||||
if (!clk_mgr->wm_range_table) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
kfree(clk_mgr->base.bw_params);
|
||||
kfree(clk_mgr401);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -112,4 +112,6 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx,
|
||||
|
||||
void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
|
||||
|
||||
unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
|
||||
|
||||
#endif /* __DCN401_CLK_MGR_H_ */
|
||||
|
@ -976,6 +976,8 @@ static bool dc_construct_ctx(struct dc *dc,
|
||||
if (!dc_ctx)
|
||||
return false;
|
||||
|
||||
dc_stream_init_rmcm_3dlut(dc);
|
||||
|
||||
dc_ctx->cgs_device = init_params->cgs_device;
|
||||
dc_ctx->driver_context = init_params->driver;
|
||||
dc_ctx->dc = dc;
|
||||
@ -5441,8 +5443,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
|
||||
else
|
||||
ret = update_planes_and_stream_v2(dc, srf_updates,
|
||||
surface_count, stream, stream_update);
|
||||
|
||||
if (ret)
|
||||
if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
|
||||
clear_update_flags(srf_updates, surface_count, stream);
|
||||
|
||||
return ret;
|
||||
@ -5473,7 +5474,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
|
||||
stream_update, state);
|
||||
|
||||
if (ret)
|
||||
if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
|
||||
clear_update_flags(srf_updates, surface_count, stream);
|
||||
}
|
||||
|
||||
@ -5546,6 +5547,15 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
|
||||
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
|
||||
}
|
||||
break;
|
||||
case DC_ACPI_CM_POWER_STATE_D3:
|
||||
if (dc->caps.ips_support)
|
||||
dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
|
||||
|
||||
if (dc->caps.ips_v2_support) {
|
||||
if (dc->clk_mgr->funcs->set_low_power_state)
|
||||
dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ASSERT(dc->current_state->stream_count == 0);
|
||||
dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
|
||||
|
@ -427,6 +427,32 @@ void get_hdr_visual_confirm_color(
|
||||
}
|
||||
}
|
||||
|
||||
/* Visual Confirm color definition for Smart Mux */
|
||||
void get_smartmux_visual_confirm_color(
|
||||
struct dc *dc,
|
||||
struct tg_color *color)
|
||||
{
|
||||
uint32_t color_value = MAX_TG_COLOR_VALUE;
|
||||
|
||||
const struct tg_color sm_ver_colors[5] = {
|
||||
{0, 0, 0}, /* SMUX_MUXCONTROL_UNSUPPORTED - Black */
|
||||
{0, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_v10 - Green */
|
||||
{0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_v15 - Cyan */
|
||||
{MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_MDM - Yellow */
|
||||
{MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_vUNKNOWN - Magenta*/
|
||||
};
|
||||
|
||||
if (dc->caps.is_apu) {
|
||||
/* APU driving the eDP */
|
||||
*color = sm_ver_colors[dc->config.smart_mux_version];
|
||||
} else {
|
||||
/* dGPU driving the eDP - red */
|
||||
color->color_r_cr = color_value;
|
||||
color->color_g_y = 0;
|
||||
color->color_b_cb = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Visual Confirm color definition for VABC */
|
||||
void get_vabc_visual_confirm_color(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
|
@ -3940,7 +3940,9 @@ enum dc_status resource_map_pool_resources(
|
||||
/* TODO: Add check if ASIC support and EDID audio */
|
||||
if (!stream->converter_disable_audio &&
|
||||
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
|
||||
stream->audio_info.mode_count && stream->audio_info.flags.all) {
|
||||
stream->audio_info.mode_count &&
|
||||
(stream->audio_info.flags.all ||
|
||||
(stream->sink && stream->sink->edid_caps.panel_patch.skip_audio_sab_check))) {
|
||||
pipe_ctx->stream_res.audio = find_first_free_audio(
|
||||
&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
|
||||
|
||||
|
@ -427,6 +427,8 @@ enum dc_status dc_state_remove_stream(
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
dc_stream_release_3dlut_for_stream(dc, stream);
|
||||
|
||||
dc_stream_release(state->streams[i]);
|
||||
state->stream_count--;
|
||||
|
||||
|
@ -856,6 +856,73 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* dc_stream_get_3dlut()
|
||||
* Requirements:
|
||||
* 1. Is stream already owns an RMCM instance, return it.
|
||||
* 2. If it doesn't and we don't need to allocate, return NULL.
|
||||
* 3. If there's a free RMCM instance, assign to stream and return it.
|
||||
* 4. If no free RMCM instances, return NULL.
|
||||
*/
|
||||
|
||||
struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
|
||||
const struct dc *dc,
|
||||
const struct dc_stream_state *stream,
|
||||
bool allocate_one)
|
||||
{
|
||||
unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
|
||||
|
||||
// see if one is allocated for this stream
|
||||
for (int i = 0; i < num_rmcm; i++) {
|
||||
if (dc->res_pool->rmcm_3dlut[i].isInUse &&
|
||||
dc->res_pool->rmcm_3dlut[i].stream == stream)
|
||||
return &dc->res_pool->rmcm_3dlut[i];
|
||||
}
|
||||
|
||||
//case: not found one, and dont need to allocate
|
||||
if (!allocate_one)
|
||||
return NULL;
|
||||
|
||||
//see if there is an unused 3dlut, allocate
|
||||
for (int i = 0; i < num_rmcm; i++) {
|
||||
if (!dc->res_pool->rmcm_3dlut[i].isInUse) {
|
||||
dc->res_pool->rmcm_3dlut[i].isInUse = true;
|
||||
dc->res_pool->rmcm_3dlut[i].stream = stream;
|
||||
return &dc->res_pool->rmcm_3dlut[i];
|
||||
}
|
||||
}
|
||||
|
||||
//dont have a 3dlut
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void dc_stream_release_3dlut_for_stream(
|
||||
const struct dc *dc,
|
||||
const struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_rmcm_3dlut *rmcm_3dlut =
|
||||
dc_stream_get_3dlut_for_stream(dc, stream, false);
|
||||
|
||||
if (rmcm_3dlut) {
|
||||
rmcm_3dlut->isInUse = false;
|
||||
rmcm_3dlut->stream = NULL;
|
||||
rmcm_3dlut->protection_bits = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void dc_stream_init_rmcm_3dlut(struct dc *dc)
|
||||
{
|
||||
unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
|
||||
|
||||
for (int i = 0; i < num_rmcm; i++) {
|
||||
dc->res_pool->rmcm_3dlut[i].isInUse = false;
|
||||
dc->res_pool->rmcm_3dlut[i].stream = NULL;
|
||||
dc->res_pool->rmcm_3dlut[i].protection_bits = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Finds the greatest index in refresh_rate_hz that contains a value <= refresh
|
||||
*/
|
||||
|
@ -246,6 +246,7 @@ struct mpc_color_caps {
|
||||
uint16_t ogam_ram : 1;
|
||||
uint16_t ocsc : 1;
|
||||
uint16_t num_3dluts : 3;
|
||||
uint16_t num_rmcm_3dluts : 3;
|
||||
uint16_t shared_3d_lut:1;
|
||||
struct rom_curve_caps ogam_rom_caps;
|
||||
struct lut3d_caps mcm_3d_lut_caps;
|
||||
@ -310,6 +311,7 @@ struct dc_caps {
|
||||
bool dmcub_support;
|
||||
bool zstate_support;
|
||||
bool ips_support;
|
||||
bool ips_v2_support;
|
||||
uint32_t num_of_internal_disp;
|
||||
enum dp_protocol_version max_dp_protocol_version;
|
||||
unsigned int mall_size_per_mem_channel;
|
||||
@ -347,6 +349,8 @@ struct dc_caps {
|
||||
struct dc_scl_caps scl_caps;
|
||||
uint8_t num_of_host_routers;
|
||||
uint8_t num_of_dpias_per_host_router;
|
||||
/* limit of the ODM only, could be limited by other factors (like pipe count)*/
|
||||
uint8_t max_odm_combine_factor;
|
||||
};
|
||||
|
||||
struct dc_bug_wa {
|
||||
@ -501,6 +505,7 @@ struct dc_config {
|
||||
bool use_spl;
|
||||
bool prefer_easf;
|
||||
bool use_pipe_ctx_sync_logic;
|
||||
int smart_mux_version;
|
||||
bool ignore_dpref_ss;
|
||||
bool enable_mipi_converter_optimization;
|
||||
bool use_default_clock_table;
|
||||
@ -511,6 +516,7 @@ struct dc_config {
|
||||
bool EnableMinDispClkODM;
|
||||
bool enable_auto_dpm_test_logs;
|
||||
unsigned int disable_ips;
|
||||
unsigned int disable_ips_rcg;
|
||||
unsigned int disable_ips_in_vpb;
|
||||
bool disable_ips_in_dpms_off;
|
||||
bool usb4_bw_alloc_support;
|
||||
@ -536,6 +542,7 @@ enum visual_confirm {
|
||||
VISUAL_CONFIRM_SWAPCHAIN = 6,
|
||||
VISUAL_CONFIRM_FAMS = 7,
|
||||
VISUAL_CONFIRM_SWIZZLE = 9,
|
||||
VISUAL_CONFIRM_SMARTMUX_DGPU = 10,
|
||||
VISUAL_CONFIRM_REPLAY = 12,
|
||||
VISUAL_CONFIRM_SUBVP = 14,
|
||||
VISUAL_CONFIRM_MCLK_SWITCH = 16,
|
||||
@ -814,6 +821,7 @@ enum pg_hw_resources {
|
||||
PG_DCHVM,
|
||||
PG_DWB,
|
||||
PG_HPO,
|
||||
PG_DCOH,
|
||||
PG_HW_RESOURCES_NUM_ELEMENT
|
||||
};
|
||||
|
||||
@ -957,6 +965,9 @@ struct dc_debug_options {
|
||||
bool disable_dsc_power_gate;
|
||||
bool disable_optc_power_gate;
|
||||
bool disable_hpo_power_gate;
|
||||
bool disable_io_clk_power_gate;
|
||||
bool disable_mem_power_gate;
|
||||
bool disable_dio_power_gate;
|
||||
int dsc_min_slice_height_override;
|
||||
int dsc_bpp_increment_div;
|
||||
bool disable_pplib_wm_range;
|
||||
@ -1294,6 +1305,12 @@ union dc_3dlut_state {
|
||||
};
|
||||
|
||||
|
||||
struct dc_rmcm_3dlut {
|
||||
bool isInUse;
|
||||
const struct dc_stream_state *stream;
|
||||
uint8_t protection_bits;
|
||||
};
|
||||
|
||||
struct dc_3dlut {
|
||||
struct kref refcount;
|
||||
struct tetrahedral_params lut_3d;
|
||||
@ -1624,6 +1641,8 @@ struct dc_scratch_space {
|
||||
|
||||
struct gpio *hpd_gpio;
|
||||
enum dc_link_fec_state fec_state;
|
||||
bool is_dds;
|
||||
bool is_display_mux_present;
|
||||
bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
|
||||
|
||||
struct dc_panel_config panel_config;
|
||||
@ -1678,6 +1697,10 @@ struct dc {
|
||||
|
||||
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
|
||||
|
||||
/* For eDP to know the switching state of SmartMux */
|
||||
bool is_switch_in_progress_orig;
|
||||
bool is_switch_in_progress_dest;
|
||||
|
||||
/* FBC compressor */
|
||||
struct compressor *fbc_compressor;
|
||||
|
||||
|
@ -1269,12 +1269,16 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
|
||||
new_signals.bits.allow_ips1 = 1;
|
||||
new_signals.bits.allow_ips2 = 1;
|
||||
new_signals.bits.allow_z10 = 1;
|
||||
// New in IPSv2.0
|
||||
new_signals.bits.allow_ips1z8 = 1;
|
||||
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
|
||||
new_signals.bits.allow_ips1 = 1;
|
||||
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
|
||||
// IPSv1.0 only
|
||||
new_signals.bits.allow_pg = 1;
|
||||
new_signals.bits.allow_ips1 = 1;
|
||||
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
|
||||
// IPSv1.0 only
|
||||
new_signals.bits.allow_pg = 1;
|
||||
new_signals.bits.allow_ips1 = 1;
|
||||
new_signals.bits.allow_ips2 = 1;
|
||||
@ -1286,6 +1290,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
|
||||
new_signals.bits.allow_ips1 = 1;
|
||||
new_signals.bits.allow_ips2 = 1;
|
||||
new_signals.bits.allow_z10 = 1;
|
||||
// New in IPSv2.0
|
||||
new_signals.bits.allow_ips1z8 = 1;
|
||||
} else {
|
||||
/* RCG only */
|
||||
new_signals.bits.allow_pg = 0;
|
||||
@ -1293,8 +1299,28 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
|
||||
new_signals.bits.allow_ips2 = 0;
|
||||
new_signals.bits.allow_z10 = 0;
|
||||
}
|
||||
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) {
|
||||
new_signals.bits.allow_pg = 1;
|
||||
new_signals.bits.allow_ips1 = 1;
|
||||
new_signals.bits.allow_ips2 = 1;
|
||||
new_signals.bits.allow_z10 = 1;
|
||||
}
|
||||
// Setting RCG allow bits (IPSv2.0)
|
||||
if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) {
|
||||
new_signals.bits.allow_ips0_rcg = 1;
|
||||
new_signals.bits.allow_ips1_rcg = 1;
|
||||
} else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) {
|
||||
new_signals.bits.allow_ips1_rcg = 1;
|
||||
} else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) {
|
||||
new_signals.bits.allow_ips0_rcg = 1;
|
||||
}
|
||||
// IPS dynamic allow bits (IPSv2 change, vpb use case)
|
||||
if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) {
|
||||
new_signals.bits.allow_dynamic_ips1 = 1;
|
||||
} else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) {
|
||||
new_signals.bits.allow_dynamic_ips1 = 1;
|
||||
new_signals.bits.allow_dynamic_ips1_z8 = 1;
|
||||
}
|
||||
|
||||
ips_driver->signals = new_signals;
|
||||
dc_dmub_srv->driver_signals = ips_driver->signals;
|
||||
}
|
||||
@ -1318,7 +1344,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
|
||||
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
{
|
||||
struct dc_dmub_srv *dc_dmub_srv;
|
||||
uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
|
||||
uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0;
|
||||
|
||||
if (dc->debug.dmcub_emulation)
|
||||
return;
|
||||
@ -1338,31 +1364,34 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
rcg_exit_count = ips_fw->rcg_exit_count;
|
||||
ips1_exit_count = ips_fw->ips1_exit_count;
|
||||
ips2_exit_count = ips_fw->ips2_exit_count;
|
||||
ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count;
|
||||
|
||||
ips_driver->signals.all = 0;
|
||||
dc_dmub_srv->driver_signals = ips_driver->signals;
|
||||
|
||||
DC_LOG_IPS(
|
||||
"%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
|
||||
"%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)",
|
||||
__func__,
|
||||
ips_driver->signals.bits.allow_ips1,
|
||||
ips_driver->signals.bits.allow_ips2,
|
||||
ips_fw->signals.bits.ips1_commit,
|
||||
ips_fw->signals.bits.ips2_commit,
|
||||
ips_fw->signals.bits.ips1z8_commit,
|
||||
ips_fw->rcg_entry_count,
|
||||
ips_fw->ips1_entry_count,
|
||||
ips_fw->ips2_entry_count);
|
||||
ips_fw->ips2_entry_count,
|
||||
ips_fw->ips1_z8ret_entry_count);
|
||||
|
||||
/* Note: register access has technically not resumed for DCN here, but we
|
||||
* need to be message PMFW through our standard register interface.
|
||||
*/
|
||||
dc_dmub_srv->needs_idle_wake = false;
|
||||
|
||||
if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
|
||||
if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
|
||||
(!dc->debug.optimize_ips_handshake ||
|
||||
ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
|
||||
ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) {
|
||||
DC_LOG_IPS(
|
||||
"wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
|
||||
"wait IPS2 eval (ips1_commit=%u ips2_commit=%u )",
|
||||
ips_fw->signals.bits.ips1_commit,
|
||||
ips_fw->signals.bits.ips2_commit);
|
||||
|
||||
@ -1422,28 +1451,31 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
dc_dmub_srv_notify_idle(dc, false);
|
||||
if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
|
||||
DC_LOG_IPS(
|
||||
"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
|
||||
"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
|
||||
ips_fw->signals.bits.ips1_commit,
|
||||
ips_fw->signals.bits.ips2_commit);
|
||||
ips_fw->signals.bits.ips2_commit,
|
||||
ips_fw->signals.bits.ips1z8_commit);
|
||||
|
||||
while (ips_fw->signals.bits.ips1_commit)
|
||||
udelay(1);
|
||||
|
||||
DC_LOG_IPS(
|
||||
"wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
|
||||
"wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
|
||||
ips_fw->signals.bits.ips1_commit,
|
||||
ips_fw->signals.bits.ips2_commit);
|
||||
ips_fw->signals.bits.ips2_commit,
|
||||
ips_fw->signals.bits.ips1z8_commit);
|
||||
}
|
||||
}
|
||||
|
||||
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
|
||||
ASSERT(0);
|
||||
|
||||
DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
|
||||
DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)",
|
||||
__func__,
|
||||
rcg_exit_count,
|
||||
ips1_exit_count,
|
||||
ips2_exit_count);
|
||||
ips2_exit_count,
|
||||
ips1z8_exit_count);
|
||||
}
|
||||
|
||||
void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
|
||||
@ -2151,3 +2183,20 @@ bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uin
|
||||
return result;
|
||||
}
|
||||
|
||||
void dc_dmub_srv_release_hw(const struct dc *dc)
|
||||
{
|
||||
struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
|
||||
union dmub_rb_cmd cmd = {0};
|
||||
|
||||
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
|
||||
return;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
|
||||
cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW;
|
||||
cmd.idle_opt_notify_idle.header.payload_bytes =
|
||||
sizeof(cmd.idle_opt_notify_idle) -
|
||||
sizeof(cmd.idle_opt_notify_idle.header);
|
||||
|
||||
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
|
||||
}
|
||||
|
@ -291,4 +291,10 @@ bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t
|
||||
struct dmub_ips_residency_info *driver_info,
|
||||
enum ips_residency_mode ips_mode);
|
||||
|
||||
/**
|
||||
* dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required.
|
||||
*
|
||||
* @dc - pointer to DC object
|
||||
*/
|
||||
void dc_dmub_srv_release_hw(const struct dc *dc);
|
||||
#endif /* _DMUB_DC_SRV_H_ */
|
||||
|
@ -579,6 +579,17 @@ bool dc_stream_set_gamut_remap(struct dc *dc,
|
||||
bool dc_stream_program_csc_matrix(struct dc *dc,
|
||||
struct dc_stream_state *stream);
|
||||
|
||||
struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
|
||||
const struct dc *dc,
|
||||
const struct dc_stream_state *stream,
|
||||
bool allocate_one);
|
||||
|
||||
void dc_stream_release_3dlut_for_stream(
|
||||
const struct dc *dc,
|
||||
const struct dc_stream_state *stream);
|
||||
|
||||
void dc_stream_init_rmcm_3dlut(struct dc *dc);
|
||||
|
||||
struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream);
|
||||
|
||||
void dc_dmub_update_dirty_rect(struct dc *dc,
|
||||
|
@ -175,6 +175,7 @@ struct dc_panel_patch {
|
||||
unsigned int embedded_tiled_slave;
|
||||
unsigned int disable_fams;
|
||||
unsigned int skip_avmute;
|
||||
unsigned int skip_audio_sab_check;
|
||||
unsigned int mst_start_top_delay;
|
||||
unsigned int remove_sink_ext_caps;
|
||||
unsigned int disable_colorimetry;
|
||||
@ -263,6 +264,7 @@ enum dc_timing_source {
|
||||
TIMING_SOURCE_EDID_4BYTE,
|
||||
TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB,
|
||||
TIMING_SOURCE_EDID_CEA_RID,
|
||||
TIMING_SOURCE_EDID_DISPLAYID_TYPE5,
|
||||
TIMING_SOURCE_VBIOS,
|
||||
TIMING_SOURCE_CV,
|
||||
TIMING_SOURCE_TV,
|
||||
@ -1313,6 +1315,7 @@ struct dc_cm2_func_luts {
|
||||
bool mpc_3dlut_enable;
|
||||
bool rmcm_3dlut_enable;
|
||||
bool mpc_mcm_post_blend;
|
||||
uint8_t rmcm_tmz;
|
||||
} lut3d_data;
|
||||
const struct dc_transfer_func *lut1d_func;
|
||||
};
|
||||
|
@ -292,9 +292,35 @@ static void set_speed(
|
||||
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
|
||||
}
|
||||
|
||||
static bool acquire_engine(struct dce_i2c_hw *dce_i2c_hw)
|
||||
{
|
||||
uint32_t arbitrate = 0;
|
||||
|
||||
REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
|
||||
switch (arbitrate) {
|
||||
case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW:
|
||||
return true;
|
||||
case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW:
|
||||
return false;
|
||||
case DC_I2C_STATUS__DC_I2C_STATUS_IDLE:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, true);
|
||||
REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
|
||||
if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool setup_engine(
|
||||
struct dce_i2c_hw *dce_i2c_hw)
|
||||
{
|
||||
// Deassert soft reset to unblock I2C engine registers
|
||||
REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, false);
|
||||
|
||||
uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
|
||||
uint32_t reset_length = 0;
|
||||
|
||||
@ -309,8 +335,8 @@ static bool setup_engine(
|
||||
REG_UPDATE_N(SETUP, 1,
|
||||
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1);
|
||||
|
||||
/* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
|
||||
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
|
||||
if (!acquire_engine(dce_i2c_hw))
|
||||
return false;
|
||||
|
||||
/*set SW requested I2c speed to default, if API calls in it will be override later*/
|
||||
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz);
|
||||
@ -319,9 +345,8 @@ static bool setup_engine(
|
||||
i2c_setup_limit = dce_i2c_hw->setup_limit;
|
||||
|
||||
/* Program pin select */
|
||||
REG_UPDATE_6(DC_I2C_CONTROL,
|
||||
REG_UPDATE_5(DC_I2C_CONTROL,
|
||||
DC_I2C_GO, 0,
|
||||
DC_I2C_SOFT_RESET, 0,
|
||||
DC_I2C_SEND_RESET, 0,
|
||||
DC_I2C_SW_STATUS_RESET, 1,
|
||||
DC_I2C_TRANSACTION_COUNT, 0,
|
||||
@ -351,6 +376,26 @@ static bool setup_engine(
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* If we boot without an HDMI display, the I2C engine does not get initialized
|
||||
* correctly. One of its symptoms is that SW_USE_I2C does not get cleared after
|
||||
* acquire, so that after setting SW_DONE_USING_I2C on release, the engine gets
|
||||
* immediately reacquired by SW, preventing DMUB from using it.
|
||||
*/
|
||||
static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw)
|
||||
{
|
||||
uint32_t arbitrate = 0;
|
||||
|
||||
REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
|
||||
if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
|
||||
return;
|
||||
|
||||
// Still acquired after release, release again as a workaround
|
||||
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
|
||||
REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
|
||||
ASSERT(arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW);
|
||||
}
|
||||
|
||||
static void release_engine(
|
||||
struct dce_i2c_hw *dce_i2c_hw)
|
||||
{
|
||||
@ -378,9 +423,9 @@ static void release_engine(
|
||||
|
||||
/*for HW HDCP Ri polling failure w/a test*/
|
||||
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp);
|
||||
/* Release I2C after reset, so HW or DMCU could use it */
|
||||
REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1,
|
||||
DC_I2C_SW_USE_I2C_REG_REQ, 0);
|
||||
// Release I2C engine so it can be used by HW or DMCU, automatically clears SW_USE_I2C
|
||||
REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
|
||||
cntl_stuck_hw_workaround(dce_i2c_hw);
|
||||
|
||||
if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) {
|
||||
if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL)
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include "dc.h"
|
||||
#include "dc_dmub_srv.h"
|
||||
#include "dc_dp_types.h"
|
||||
#include "dmub/dmub_srv.h"
|
||||
#include "core_types.h"
|
||||
#include "dmub_replay.h"
|
||||
@ -43,21 +44,45 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s
|
||||
/*
|
||||
* Enable/Disable Replay.
|
||||
*/
|
||||
static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
|
||||
static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst,
|
||||
struct dc_link *link)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
struct dc_context *dc = dmub->ctx;
|
||||
uint32_t retry_count;
|
||||
enum replay_state state = REPLAY_STATE_0;
|
||||
struct pipe_ctx *pipe_ctx = NULL;
|
||||
struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
|
||||
uint8_t i;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
|
||||
cmd.replay_enable.data.panel_inst = panel_inst;
|
||||
|
||||
cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
|
||||
if (enable)
|
||||
if (enable) {
|
||||
cmd.replay_enable.data.enable = REPLAY_ENABLE;
|
||||
else
|
||||
// hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay
|
||||
if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (res_ctx &&
|
||||
res_ctx->pipe_ctx[i].stream &&
|
||||
res_ctx->pipe_ctx[i].stream->link &&
|
||||
res_ctx->pipe_ctx[i].stream->link == link &&
|
||||
res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
//TODO: refactor for multi edp support
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!pipe_ctx)
|
||||
return;
|
||||
|
||||
cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
|
||||
cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
|
||||
}
|
||||
} else
|
||||
cmd.replay_enable.data.enable = REPLAY_DISABLE;
|
||||
|
||||
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
|
||||
@ -149,6 +174,17 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
|
||||
copy_settings_data->digbe_inst = replay_context->digbe_inst;
|
||||
copy_settings_data->digfe_inst = replay_context->digfe_inst;
|
||||
|
||||
if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
|
||||
if (pipe_ctx->stream_res.hpo_dp_stream_enc)
|
||||
copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
|
||||
else
|
||||
copy_settings_data->hpo_stream_enc_inst = 0;
|
||||
if (pipe_ctx->link_res.hpo_dp_link_enc)
|
||||
copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
|
||||
else
|
||||
copy_settings_data->hpo_link_enc_inst = 0;
|
||||
}
|
||||
|
||||
if (pipe_ctx->plane_res.dpp)
|
||||
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
|
||||
else
|
||||
@ -211,6 +247,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
|
||||
pCmd->header.type = DMUB_CMD__REPLAY;
|
||||
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
|
||||
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
|
||||
pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst;
|
||||
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
|
||||
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
|
||||
|
||||
|
@ -19,7 +19,7 @@ struct dmub_replay_funcs {
|
||||
void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
|
||||
uint8_t panel_inst);
|
||||
void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
|
||||
uint8_t panel_inst);
|
||||
uint8_t panel_inst, struct dc_link *link);
|
||||
bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
|
||||
struct replay_context *replay_context, uint8_t panel_inst);
|
||||
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
|
||||
|
@ -30,6 +30,9 @@
|
||||
#include "rc_calc.h"
|
||||
#include "fixed31_32.h"
|
||||
|
||||
#include "clk_mgr.h"
|
||||
#include "resource.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
dsc->ctx->logger
|
||||
|
||||
@ -149,6 +152,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
|
||||
}
|
||||
|
||||
/* Forward Declerations */
|
||||
static unsigned int get_min_slice_count_for_odm(
|
||||
const struct display_stream_compressor *dsc,
|
||||
const struct dsc_enc_caps *dsc_enc_caps,
|
||||
const struct dc_crtc_timing *timing);
|
||||
|
||||
static bool decide_dsc_bandwidth_range(
|
||||
const uint32_t min_bpp_x16,
|
||||
const uint32_t max_bpp_x16,
|
||||
@ -183,6 +191,7 @@ static bool setup_dsc_config(
|
||||
const struct dc_crtc_timing *timing,
|
||||
const struct dc_dsc_config_options *options,
|
||||
const enum dc_link_encoding_format link_encoding,
|
||||
int min_slice_count,
|
||||
struct dc_dsc_config *dsc_cfg);
|
||||
|
||||
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
|
||||
@ -442,7 +451,6 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and
|
||||
* timing's pixel clock and uncompressed bandwidth.
|
||||
* If DSC is not possible, leave '*range' untouched.
|
||||
@ -458,6 +466,7 @@ bool dc_dsc_compute_bandwidth_range(
|
||||
struct dc_dsc_bw_range *range)
|
||||
{
|
||||
bool is_dsc_possible = false;
|
||||
unsigned int min_slice_count;
|
||||
struct dsc_enc_caps dsc_enc_caps;
|
||||
struct dsc_enc_caps dsc_common_caps;
|
||||
struct dc_dsc_config config = {0};
|
||||
@ -469,12 +478,14 @@ bool dc_dsc_compute_bandwidth_range(
|
||||
|
||||
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
|
||||
|
||||
min_slice_count = get_min_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
|
||||
|
||||
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
|
||||
timing->pixel_encoding, &dsc_common_caps);
|
||||
|
||||
if (is_dsc_possible)
|
||||
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
|
||||
&options, link_encoding, &config);
|
||||
&options, link_encoding, min_slice_count, &config);
|
||||
|
||||
if (is_dsc_possible)
|
||||
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
|
||||
@ -525,20 +536,152 @@ void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
|
||||
DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp);
|
||||
}
|
||||
|
||||
|
||||
static void build_dsc_enc_combined_slice_caps(
|
||||
const struct dsc_enc_caps *single_dsc_enc_caps,
|
||||
struct dsc_enc_caps *dsc_enc_caps,
|
||||
unsigned int max_odm_combine_factor)
|
||||
{
|
||||
/* 1-16 slice configurations, single DSC */
|
||||
dsc_enc_caps->slice_caps.raw |= single_dsc_enc_caps->slice_caps.raw;
|
||||
|
||||
/* 2x DSC's */
|
||||
if (max_odm_combine_factor >= 2) {
|
||||
/* 1 + 1 */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
|
||||
|
||||
/* 2 + 2 */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
|
||||
|
||||
/* 4 + 4 */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
|
||||
|
||||
/* 8 + 8 */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_8;
|
||||
}
|
||||
|
||||
/* 3x DSC's */
|
||||
if (max_odm_combine_factor >= 3) {
|
||||
/* 4 + 4 + 4 */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
|
||||
}
|
||||
|
||||
/* 4x DSC's */
|
||||
if (max_odm_combine_factor >= 4) {
|
||||
/* 1 + 1 + 1 + 1 */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
|
||||
|
||||
/* 2 + 2 + 2 + 2 */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
|
||||
|
||||
/* 3 + 3 + 3 + 3 */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_3;
|
||||
|
||||
/* 4 + 4 + 4 + 4 */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
|
||||
}
|
||||
}
|
||||
|
||||
static void build_dsc_enc_caps(
|
||||
const struct display_stream_compressor *dsc,
|
||||
struct dsc_enc_caps *dsc_enc_caps)
|
||||
{
|
||||
unsigned int max_dscclk_khz;
|
||||
unsigned int num_dsc;
|
||||
unsigned int max_odm_combine_factor;
|
||||
struct dsc_enc_caps single_dsc_enc_caps;
|
||||
|
||||
struct dc *dc;
|
||||
|
||||
memset(&single_dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
|
||||
|
||||
if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps)
|
||||
return;
|
||||
|
||||
dc = dsc->ctx->dc;
|
||||
|
||||
if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool)
|
||||
return;
|
||||
|
||||
/* get max DSCCLK from clk_mgr */
|
||||
max_dscclk_khz = dc->clk_mgr->funcs->get_max_clock_khz(dc->clk_mgr, CLK_TYPE_DSCCLK);
|
||||
|
||||
dsc->funcs->dsc_get_single_enc_caps(&single_dsc_enc_caps, max_dscclk_khz);
|
||||
|
||||
/* global capabilities */
|
||||
dsc_enc_caps->dsc_version = single_dsc_enc_caps.dsc_version;
|
||||
dsc_enc_caps->lb_bit_depth = single_dsc_enc_caps.lb_bit_depth;
|
||||
dsc_enc_caps->is_block_pred_supported = single_dsc_enc_caps.is_block_pred_supported;
|
||||
dsc_enc_caps->max_slice_width = single_dsc_enc_caps.max_slice_width;
|
||||
dsc_enc_caps->bpp_increment_div = single_dsc_enc_caps.bpp_increment_div;
|
||||
dsc_enc_caps->color_formats.raw = single_dsc_enc_caps.color_formats.raw;
|
||||
dsc_enc_caps->color_depth.raw = single_dsc_enc_caps.color_depth.raw;
|
||||
|
||||
/* expand per DSC capabilities to global */
|
||||
max_odm_combine_factor = dc->caps.max_odm_combine_factor;
|
||||
num_dsc = dc->res_pool->res_cap->num_dsc;
|
||||
max_odm_combine_factor = min(max_odm_combine_factor, num_dsc);
|
||||
dsc_enc_caps->max_total_throughput_mps =
|
||||
single_dsc_enc_caps.max_total_throughput_mps *
|
||||
max_odm_combine_factor;
|
||||
|
||||
/* check slice counts possible for with ODM combine */
|
||||
build_dsc_enc_combined_slice_caps(&single_dsc_enc_caps, dsc_enc_caps, max_odm_combine_factor);
|
||||
}
|
||||
|
||||
static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
|
||||
{
|
||||
return (value + 9) / 10;
|
||||
}
|
||||
|
||||
static unsigned int get_min_slice_count_for_odm(
|
||||
const struct display_stream_compressor *dsc,
|
||||
const struct dsc_enc_caps *dsc_enc_caps,
|
||||
const struct dc_crtc_timing *timing)
|
||||
{
|
||||
unsigned int max_dispclk_khz;
|
||||
|
||||
/* get max pixel rate and combine caps */
|
||||
max_dispclk_khz = dsc_enc_caps->max_total_throughput_mps * 1000;
|
||||
if (dsc && dsc->ctx->dc) {
|
||||
if (dsc->ctx->dc->clk_mgr &&
|
||||
dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz) {
|
||||
/* dispclk is available */
|
||||
max_dispclk_khz = dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz(dsc->ctx->dc->clk_mgr, CLK_TYPE_DISPCLK);
|
||||
}
|
||||
}
|
||||
|
||||
/* consider minimum odm slices required due to
|
||||
* 1) display pipe throughput (dispclk)
|
||||
* 2) max image width per slice
|
||||
*/
|
||||
return dc_fixpt_ceil(dc_fixpt_max(
|
||||
dc_fixpt_div_int(dc_fixpt_from_int(dsc_div_by_10_round_up(timing->pix_clk_100hz)),
|
||||
max_dispclk_khz), // throughput
|
||||
dc_fixpt_div_int(dc_fixpt_from_int(timing->h_addressable + timing->h_border_left + timing->h_border_right),
|
||||
dsc_enc_caps->max_slice_width))); // slice width
|
||||
}
|
||||
|
||||
static void get_dsc_enc_caps(
|
||||
const struct display_stream_compressor *dsc,
|
||||
struct dsc_enc_caps *dsc_enc_caps,
|
||||
int pixel_clock_100Hz)
|
||||
{
|
||||
// This is a static HW query, so we can use any DSC
|
||||
|
||||
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
|
||||
if (dsc) {
|
||||
|
||||
if (!dsc)
|
||||
return;
|
||||
|
||||
/* check if reported cap global or only for a single DCN DSC enc */
|
||||
if (dsc->funcs->dsc_get_enc_caps) {
|
||||
if (!dsc->ctx->dc->debug.disable_dsc)
|
||||
dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
|
||||
if (dsc->ctx->dc->debug.native422_support)
|
||||
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
|
||||
} else {
|
||||
build_dsc_enc_caps(dsc, dsc_enc_caps);
|
||||
}
|
||||
|
||||
if (dsc->ctx->dc->debug.native422_support)
|
||||
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
|
||||
}
|
||||
|
||||
/* Returns 'false' if no intersection was found for at least one capability.
|
||||
@ -621,11 +764,6 @@ static bool intersect_dsc_caps(
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
|
||||
{
|
||||
return (value + 9) / 10;
|
||||
}
|
||||
|
||||
static uint32_t compute_bpp_x16_from_target_bandwidth(
|
||||
const uint32_t bandwidth_in_kbps,
|
||||
const struct dc_crtc_timing *timing,
|
||||
@ -910,11 +1048,11 @@ static bool setup_dsc_config(
|
||||
const struct dc_crtc_timing *timing,
|
||||
const struct dc_dsc_config_options *options,
|
||||
const enum dc_link_encoding_format link_encoding,
|
||||
int min_slices_h,
|
||||
struct dc_dsc_config *dsc_cfg)
|
||||
{
|
||||
struct dsc_enc_caps dsc_common_caps;
|
||||
int max_slices_h = 0;
|
||||
int min_slices_h = 0;
|
||||
int num_slices_h = 0;
|
||||
int pic_width;
|
||||
int slice_width;
|
||||
@ -1018,12 +1156,9 @@ static bool setup_dsc_config(
|
||||
if (!is_dsc_possible)
|
||||
goto done;
|
||||
|
||||
min_slices_h = pic_width / dsc_common_caps.max_slice_width;
|
||||
if (pic_width % dsc_common_caps.max_slice_width)
|
||||
min_slices_h++;
|
||||
|
||||
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
|
||||
|
||||
/* increase minimum slice count to meet sink throughput limitations */
|
||||
while (min_slices_h <= max_slices_h) {
|
||||
int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h;
|
||||
if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000)
|
||||
@ -1032,14 +1167,12 @@ static bool setup_dsc_config(
|
||||
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
|
||||
}
|
||||
|
||||
is_dsc_possible = (min_slices_h <= max_slices_h);
|
||||
|
||||
if (pic_width % min_slices_h != 0)
|
||||
min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
|
||||
|
||||
if (min_slices_h == 0 && max_slices_h == 0)
|
||||
is_dsc_possible = false;
|
||||
/* increase minimum slice count to meet divisibility requirements */
|
||||
while (pic_width % min_slices_h != 0 && min_slices_h <= max_slices_h) {
|
||||
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
|
||||
}
|
||||
|
||||
is_dsc_possible = (min_slices_h <= max_slices_h) && max_slices_h != 0;
|
||||
if (!is_dsc_possible)
|
||||
goto done;
|
||||
|
||||
@ -1162,12 +1295,19 @@ bool dc_dsc_compute_config(
|
||||
{
|
||||
bool is_dsc_possible = false;
|
||||
struct dsc_enc_caps dsc_enc_caps;
|
||||
|
||||
unsigned int min_slice_count;
|
||||
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
|
||||
|
||||
min_slice_count = get_min_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
|
||||
|
||||
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
|
||||
&dsc_enc_caps,
|
||||
target_bandwidth_kbps,
|
||||
timing, options, link_encoding, dsc_cfg);
|
||||
timing,
|
||||
options,
|
||||
link_encoding,
|
||||
min_slice_count,
|
||||
dsc_cfg);
|
||||
return is_dsc_possible;
|
||||
}
|
||||
|
||||
|
@ -9,17 +9,14 @@
|
||||
#include "dsc/dscc_types.h"
|
||||
#include "dsc/rc_calc.h"
|
||||
|
||||
#define MAX_THROUGHPUT_PER_DSC_100HZ 20000000
|
||||
#define MAX_DSC_UNIT_COMBINE 4
|
||||
|
||||
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
|
||||
|
||||
/* Object I/F functions */
|
||||
//static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
|
||||
//static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
|
||||
static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
|
||||
|
||||
static const struct dsc_funcs dcn401_dsc_funcs = {
|
||||
.dsc_get_enc_caps = dsc401_get_enc_caps,
|
||||
.dsc_read_state = dsc401_read_state,
|
||||
.dsc_validate_stream = dsc401_validate_stream,
|
||||
.dsc_set_config = dsc401_set_config,
|
||||
@ -28,6 +25,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
|
||||
.dsc_disable = dsc401_disable,
|
||||
.dsc_disconnect = dsc401_disconnect,
|
||||
.dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear,
|
||||
.dsc_get_single_enc_caps = dsc401_get_single_enc_caps,
|
||||
};
|
||||
|
||||
/* Macro definitios for REG_SET macros*/
|
||||
@ -64,22 +62,14 @@ void dsc401_construct(struct dcn401_dsc *dsc,
|
||||
dsc->max_image_width = 5184;
|
||||
}
|
||||
|
||||
void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
|
||||
static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz)
|
||||
{
|
||||
int min_dsc_unit_required = (pixel_clock_100Hz + MAX_THROUGHPUT_PER_DSC_100HZ - 1) / MAX_THROUGHPUT_PER_DSC_100HZ;
|
||||
|
||||
dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
|
||||
|
||||
/* 1 slice is only supported with 1 DSC unit */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = min_dsc_unit_required == 1 ? 1 : 0;
|
||||
/* 2 slice is only supported with 1 or 2 DSC units */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = (min_dsc_unit_required == 1 || min_dsc_unit_required == 2) ? 1 : 0;
|
||||
/* 3 slice is only supported with 1 DSC unit */
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = min_dsc_unit_required == 1 ? 1 : 0;
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1;
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1;
|
||||
dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1;
|
||||
|
||||
dsc_enc_caps->lb_bit_depth = 13;
|
||||
dsc_enc_caps->is_block_pred_supported = true;
|
||||
@ -93,7 +83,7 @@ void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100H
|
||||
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
|
||||
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
|
||||
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
|
||||
dsc_enc_caps->max_total_throughput_mps = MAX_THROUGHPUT_PER_DSC_100HZ * MAX_DSC_UNIT_COMBINE;
|
||||
dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000;
|
||||
|
||||
dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
|
||||
dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
|
||||
|
@ -341,7 +341,6 @@ void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_c
|
||||
void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe);
|
||||
void dsc401_disable(struct display_stream_compressor *dsc);
|
||||
void dsc401_disconnect(struct display_stream_compressor *dsc);
|
||||
void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
|
||||
void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
|
||||
#endif
|
||||
|
||||
|
@ -108,6 +108,7 @@ struct dsc_funcs {
|
||||
void (*dsc_disable)(struct display_stream_compressor *dsc);
|
||||
void (*dsc_disconnect)(struct display_stream_compressor *dsc);
|
||||
void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc);
|
||||
void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -86,11 +86,11 @@ void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width
|
||||
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
|
||||
}
|
||||
|
||||
void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
|
||||
void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits)
|
||||
{
|
||||
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
|
||||
|
||||
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0);
|
||||
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits);
|
||||
}
|
||||
|
||||
void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
|
||||
|
@ -333,7 +333,7 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
|
||||
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
|
||||
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
|
||||
|
||||
void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled);
|
||||
void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits);
|
||||
|
||||
void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
|
||||
|
||||
|
@ -1686,6 +1686,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
|
||||
|
||||
/* Temporary workaround to perform DSC programming ahead of stream enablement
|
||||
* for smartmux/SPRS
|
||||
* TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
|
||||
*/
|
||||
if (pipe_ctx->stream->timing.flags.DSC) {
|
||||
if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
|
||||
((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
|
||||
|| link->is_dds || link->skip_implict_edp_power_control)) &&
|
||||
(dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal)))
|
||||
dc->link_srv->set_dsc_enable(pipe_ctx, true);
|
||||
}
|
||||
|
||||
if (!stream->dpms_off)
|
||||
dc->link_srv->set_dpms_on(context, pipe_ctx);
|
||||
|
||||
@ -1927,6 +1940,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
|
||||
|
||||
can_apply_edp_fast_boot = dc_validate_boot_timing(dc,
|
||||
edp_stream->sink, &edp_stream->timing);
|
||||
|
||||
// For Mux-platform, the default value is false.
|
||||
// Disable fast boot during mux switching.
|
||||
// The flag would be clean after switching done.
|
||||
if (dc->is_switch_in_progress_dest && edp_link->is_dds)
|
||||
can_apply_edp_fast_boot = false;
|
||||
|
||||
edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
|
||||
if (can_apply_edp_fast_boot) {
|
||||
DC_LOG_EVENT_LINK_TRAINING("eDP fast boot Enable\n");
|
||||
@ -1970,6 +1990,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
|
||||
if (edp_with_sink_num)
|
||||
edp_link_with_sink = edp_links_with_sink[0];
|
||||
|
||||
// During a mux switch, powering down the HW blocks and then enabling
|
||||
// the link via a DPCD SET_POWER write causes a brief flash
|
||||
keep_edp_vdd_on |= dc->is_switch_in_progress_dest;
|
||||
|
||||
if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
|
||||
if (edp_link_with_sink && !keep_edp_vdd_on) {
|
||||
/*turn off backlight before DP_blank and encoder powered down*/
|
||||
|
@ -335,7 +335,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
|
||||
struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
|
||||
|
||||
if (!s->blank_en) {
|
||||
DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x",
|
||||
DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x",
|
||||
pool->hubps[i]->inst,
|
||||
fl_regs->lut_enable,
|
||||
fl_regs->lut_done,
|
||||
|
@ -562,6 +562,19 @@ static void dcn31_reset_back_end_for_pipe(
|
||||
else if (pipe_ctx->stream_res.audio)
|
||||
dc->hwss.disable_audio_stream(pipe_ctx);
|
||||
|
||||
/* Temporary workaround to perform DSC programming ahead of pipe reset
|
||||
* for smartmux/SPRS
|
||||
* TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
|
||||
*/
|
||||
if (pipe_ctx->stream->timing.flags.DSC) {
|
||||
if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
|
||||
((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
|
||||
|| link->is_dds || link->skip_implict_edp_power_control)) &&
|
||||
(dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal)))
|
||||
dc->link_srv->set_dsc_enable(pipe_ctx, false);
|
||||
}
|
||||
|
||||
/* free acquired resources */
|
||||
if (pipe_ctx->stream_res.audio) {
|
||||
/*disable az_endpoint*/
|
||||
|
@ -55,15 +55,15 @@
|
||||
#include "dcn20/dcn20_optc.h"
|
||||
#include "dcn30/dcn30_cm_common.h"
|
||||
|
||||
#define DC_LOGGER_INIT(logger) \
|
||||
struct dal_logger *dc_logger = logger
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
#define CTX \
|
||||
hws->ctx
|
||||
#define REG(reg)\
|
||||
hws->regs->reg
|
||||
#define DC_LOGGER \
|
||||
dc_logger
|
||||
stream->ctx->logger
|
||||
|
||||
|
||||
#undef FN
|
||||
#define FN(reg_name, field_name) \
|
||||
@ -76,8 +76,6 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
struct pipe_ctx *odm_pipe;
|
||||
int opp_cnt = 1;
|
||||
|
||||
DC_LOGGER_INIT(stream->ctx->logger);
|
||||
|
||||
ASSERT(dsc);
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
|
||||
opp_cnt++;
|
||||
@ -530,32 +528,3 @@ void dcn314_disable_link_output(struct dc_link *link,
|
||||
|
||||
apply_symclk_on_tx_off_wa(link);
|
||||
}
|
||||
|
||||
|
||||
void dcn314_plane_atomic_power_down(struct dc *dc,
|
||||
struct dpp *dpp,
|
||||
struct hubp *hubp)
|
||||
{
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
if (REG(DC_IP_REQUEST_CNTL)) {
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
|
||||
|
||||
if (hws->funcs.dpp_pg_control) {
|
||||
hws->funcs.dpp_pg_control(hws, dpp->inst, false);
|
||||
dpp->funcs->dpp_reset(dpp);
|
||||
}
|
||||
|
||||
if (hws->funcs.hubp_pg_control) {
|
||||
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
|
||||
hubp->funcs->hubp_reset(hubp);
|
||||
}
|
||||
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
|
||||
DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst);
|
||||
}
|
||||
|
||||
if (hws->funcs.dpp_root_clock_control)
|
||||
hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
|
||||
}
|
||||
|
@ -47,6 +47,4 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
|
||||
|
||||
void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
|
||||
|
||||
void dcn314_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp);
|
||||
|
||||
#endif /* __DC_HWSS_DCN314_H__ */
|
||||
|
@ -137,7 +137,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
|
||||
.disable_vga = dcn20_disable_vga,
|
||||
.bios_golden_init = dcn10_bios_golden_init,
|
||||
.plane_atomic_disable = dcn20_plane_atomic_disable,
|
||||
.plane_atomic_power_down = dcn314_plane_atomic_power_down,
|
||||
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
|
||||
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
|
||||
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
|
||||
.hubp_pg_control = dcn31_hubp_pg_control,
|
||||
|
@ -51,7 +51,7 @@
|
||||
#define FN(reg_name, field_name) \
|
||||
hws->shifts->field_name, hws->masks->field_name
|
||||
|
||||
static void dcn401_initialize_min_clocks(struct dc *dc)
|
||||
void dcn401_initialize_min_clocks(struct dc *dc)
|
||||
{
|
||||
struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
|
||||
|
||||
@ -2632,10 +2632,12 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
|
||||
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
|
||||
if (org_ip_request_cntl == 0)
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 1);
|
||||
if (REG(DC_IP_REQUEST_CNTL)) {
|
||||
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
|
||||
if (org_ip_request_cntl == 0)
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 1);
|
||||
}
|
||||
|
||||
if (hws->funcs.dpp_pg_control)
|
||||
hws->funcs.dpp_pg_control(hws, dpp->inst, false);
|
||||
@ -2646,7 +2648,7 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
|
||||
hubp->funcs->hubp_reset(hubp);
|
||||
dpp->funcs->dpp_reset(dpp);
|
||||
|
||||
if (org_ip_request_cntl == 0)
|
||||
if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 0);
|
||||
|
||||
|
@ -109,4 +109,5 @@ void dcn401_detect_pipe_changes(
|
||||
void dcn401_plane_atomic_power_down(struct dc *dc,
|
||||
struct dpp *dpp,
|
||||
struct hubp *hubp);
|
||||
void dcn401_initialize_min_clocks(struct dc *dc);
|
||||
#endif /* __DC_HWSS_DCN401_H__ */
|
||||
|
@ -502,6 +502,9 @@ void get_hdr_visual_confirm_color(
|
||||
void get_mpctree_visual_confirm_color(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color);
|
||||
void get_smartmux_visual_confirm_color(
|
||||
struct dc *dc,
|
||||
struct tg_color *color);
|
||||
void get_vabc_visual_confirm_color(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color);
|
||||
|
@ -67,6 +67,8 @@ struct resource_context;
|
||||
struct clk_bw_params;
|
||||
struct dc_mcache_params;
|
||||
|
||||
#define MAX_RMCM_INST 2
|
||||
|
||||
struct resource_funcs {
|
||||
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
|
||||
void (*destroy)(struct resource_pool **pool);
|
||||
@ -286,6 +288,7 @@ struct resource_pool {
|
||||
struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS];
|
||||
struct dc_3dlut *mpc_lut[MAX_PIPES];
|
||||
struct dc_transfer_func *mpc_shaper[MAX_PIPES];
|
||||
struct dc_rmcm_3dlut rmcm_3dlut[MAX_RMCM_INST];
|
||||
|
||||
struct {
|
||||
unsigned int xtalin_clock_inKhz;
|
||||
|
@ -100,6 +100,17 @@ struct dcn301_clk_internal {
|
||||
#define MAX_NUM_DPM_LVL 8
|
||||
#define WM_SET_COUNT 4
|
||||
|
||||
enum clk_type {
|
||||
CLK_TYPE_DCFCLK,
|
||||
CLK_TYPE_FCLK,
|
||||
CLK_TYPE_MCLK,
|
||||
CLK_TYPE_SOCCLK,
|
||||
CLK_TYPE_DTBCLK,
|
||||
CLK_TYPE_DISPCLK,
|
||||
CLK_TYPE_DPPCLK,
|
||||
CLK_TYPE_DSCCLK,
|
||||
CLK_TYPE_COUNT
|
||||
};
|
||||
|
||||
struct clk_limit_table_entry {
|
||||
unsigned int voltage; /* milivolts withh 2 fractional bits */
|
||||
@ -326,6 +337,9 @@ struct clk_mgr_funcs {
|
||||
|
||||
bool (*is_dc_mode_present)(struct clk_mgr *clk_mgr);
|
||||
|
||||
uint32_t (*set_smartmux_switch)(struct clk_mgr *clk_mgr, uint32_t pins_to_set);
|
||||
|
||||
unsigned int (*get_max_clock_khz)(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
|
||||
};
|
||||
|
||||
struct clk_mgr {
|
||||
|
@ -282,7 +282,7 @@ struct hubp_funcs {
|
||||
void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable);
|
||||
void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
|
||||
void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width);
|
||||
void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled);
|
||||
void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits);
|
||||
void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp,
|
||||
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
|
||||
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
|
||||
|
@ -46,6 +46,8 @@ struct pg_cntl_funcs {
|
||||
void (*opp_pg_control)(struct pg_cntl *pg_cntl, unsigned int opp_inst, bool power_on);
|
||||
void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on);
|
||||
void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
|
||||
void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
|
||||
void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
|
||||
void (*init_pg_status)(struct pg_cntl *pg_cntl);
|
||||
};
|
||||
|
||||
|
@ -74,7 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
|
||||
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
|
||||
struct encoder_set_dp_phy_pattern_param *tp_params)
|
||||
{
|
||||
uint8_t clk_src = 0x4C;
|
||||
uint8_t clk_src = 0xC4;
|
||||
uint8_t pattern = 0x4F; /* SQ128 */
|
||||
|
||||
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
|
||||
|
@ -140,7 +140,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
|
||||
}
|
||||
}
|
||||
|
||||
if (((!link->wa_flags.dp_keep_receiver_powered) || hw_init) &&
|
||||
if (((!dc->is_switch_in_progress_dest) && ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)) &&
|
||||
(link->type != dc_connection_none))
|
||||
dpcd_write_rx_power_ctrl(link, false);
|
||||
}
|
||||
@ -2537,6 +2537,14 @@ void link_set_dpms_on(
|
||||
!pipe_ctx->next_odm_pipe) {
|
||||
pipe_ctx->stream->dpms_off = false;
|
||||
update_psp_stream_config(pipe_ctx, false);
|
||||
|
||||
if (link->is_dds) {
|
||||
uint32_t post_oui_delay = 30; // 30ms
|
||||
|
||||
dpcd_set_source_specific_data(link);
|
||||
msleep(post_oui_delay);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2629,6 +2637,15 @@ void link_set_dpms_on(
|
||||
dp_is_128b_132b_signal(pipe_ctx))
|
||||
update_sst_payload(pipe_ctx, true);
|
||||
|
||||
/* Corruption was observed on systems with display mux when stream gets
|
||||
* enabled after the mux switch. Having a small delay between link
|
||||
* training and stream unblank resolves the corruption issue.
|
||||
* This is workaround.
|
||||
*/
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
|
||||
link->is_display_mux_present)
|
||||
msleep(20);
|
||||
|
||||
dc->hwss.unblank_stream(pipe_ctx,
|
||||
&pipe_ctx->stream->link->cur_link_settings);
|
||||
|
||||
|
@ -539,10 +539,16 @@ static bool construct_phy(struct dc_link *link,
|
||||
|
||||
break;
|
||||
case CONNECTOR_ID_EDP:
|
||||
// If smartmux is supported, only create the link on the primary eDP.
|
||||
// Dual eDP is not supported with smartmux.
|
||||
if (!(!link->dc->config.smart_mux_version || dc_ctx->dc_edp_id_count == 0))
|
||||
goto create_fail;
|
||||
|
||||
link->connector_signal = SIGNAL_TYPE_EDP;
|
||||
|
||||
if (link->hpd_gpio) {
|
||||
if (!link->dc->config.allow_edp_hotplug_detection)
|
||||
if (!link->dc->config.allow_edp_hotplug_detection
|
||||
&& !is_smartmux_suported(link))
|
||||
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
|
||||
|
||||
switch (link->dc->config.allow_edp_hotplug_detection) {
|
||||
|
@ -1388,6 +1388,21 @@ void dpcd_set_source_specific_data(struct dc_link *link)
|
||||
struct dpcd_amd_signature amd_signature = {0};
|
||||
struct dpcd_amd_device_id amd_device_id = {0};
|
||||
|
||||
if (link->is_dds) {
|
||||
uint8_t dpcd_dp_edp_backlight_mode = 0;
|
||||
|
||||
/*
|
||||
* Write 0 to bits 0:1 for dp_edp_backlight_mode_set register
|
||||
* if platform is DDS
|
||||
*/
|
||||
core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
|
||||
&dpcd_dp_edp_backlight_mode, sizeof(uint8_t));
|
||||
dpcd_dp_edp_backlight_mode &= ~0x3;
|
||||
|
||||
core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
|
||||
&dpcd_dp_edp_backlight_mode, sizeof(uint8_t));
|
||||
}
|
||||
|
||||
amd_device_id.device_id_byte1 =
|
||||
(uint8_t)(link->ctx->asic_id.chip_id);
|
||||
amd_device_id.device_id_byte2 =
|
||||
@ -1543,6 +1558,10 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
|
||||
return false;
|
||||
|
||||
link->dpcd_sink_ext_caps.raw = dpcd_data;
|
||||
if (link->is_dds && !link->dpcd_sink_ext_caps.bits.oled) {
|
||||
link->dpcd_sink_ext_caps.raw = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK)
|
||||
return false;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user