Merge tag 'amd-drm-next-6.16-2025-05-16' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amdgpu:
- Misc code cleanups
- UserQ fixes
- MALL reporting fix
- DP AUX fixes
- DCN 3.5 fixes
- DP MST fixes
- DC DMI quirks cleanup
- RAS fixes
- SR-IOV updates
- GC 9.5 updates
- Misc display fixes
- VCN 4.0.5 powergating race fix
- SMU 13.x updates
- Paritioning fixes
- VCN 5.0.1 SR-IOV updates
- JPEG 5.0.1 SR-IOV updates

amdkfd:
- Fix spurious warning in interrupt code
- XNACK fixes

radeon:
- CIK doorbell cleanup

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://lore.kernel.org/r/20250516204609.2437472-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie
2025-05-19 09:00:37 +10:00
114 changed files with 2042 additions and 1127 deletions

View File

@@ -182,7 +182,7 @@ we have a dedicated glossary for Display Core at
SMU/SMC
System Management Unit / System Management Controller
SPI
SPI (AMDGPU)
Shader Processor Input
SRLC

View File

@@ -281,6 +281,9 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
case ATOM_DGPU_VRAM_TYPE_GDDR6:
vram_type = AMDGPU_VRAM_TYPE_GDDR6;
break;
case ATOM_DGPU_VRAM_TYPE_HBM3E:
vram_type = AMDGPU_VRAM_TYPE_HBM3E;
break;
default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
break;

View File

@@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))

View File

@@ -2105,6 +2105,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
amdgpu_psp_debugfs_init(adev);
debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
&amdgpu_evict_vram_fops);

View File

@@ -183,7 +183,7 @@ void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr)
dma_fence_wait(&ev_fence->base, false);
/* Last unref of ev_fence */
dma_fence_put(&evf_mgr->ev_fence->base);
dma_fence_put(&ev_fence->base);
}
int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,

View File

@@ -62,6 +62,9 @@
*/
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
/* XNACK flags */
#define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0)
struct firmware;
enum amdgpu_memory_partition {
@@ -301,6 +304,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
int noretry;
uint32_t xnack_flags;
uint32_t vmid0_page_table_block_size;
uint32_t vmid0_page_table_depth;

View File

@@ -619,6 +619,10 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned int type)
{
/* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
if (amdgpu_ras_is_rma(adev))
return -EINVAL;
if (!adev->irq.installed)
return -ENOENT;

View File

@@ -1425,16 +1425,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
mutex_init(&fpriv->bo_list_lock);
idr_init_base(&fpriv->bo_list_handles, 1);
r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
if (r)
DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
r = amdgpu_eviction_fence_init(&fpriv->evf_mgr);
if (r)
goto error_vm;
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
if (r)
DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
file_priv->driver_priv = fpriv;
goto out_suspend;
@@ -1502,10 +1502,11 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unreserve(pd);
}
fpriv->evf_mgr.fd_closing = true;
amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
if (!fpriv->evf_mgr.fd_closing) {
fpriv->evf_mgr.fd_closing = true;
amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
}
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
amdgpu_vm_fini(adev, &fpriv->vm);

View File

@@ -1044,7 +1044,8 @@ static const char * const amdgpu_vram_names[] = {
"GDDR6",
"DDR5",
"LPDDR4",
"LPDDR5"
"LPDDR5",
"HBM3E"
};
/**

View File

@@ -4186,6 +4186,110 @@ const struct attribute_group amdgpu_flash_attr_group = {
.is_visible = amdgpu_flash_attr_is_visible,
};
#if defined(CONFIG_DEBUG_FS)
static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
{
struct amdgpu_device *adev = filp->f_inode->i_private;
struct spirom_bo *bo_triplet;
int ret;
/* serialize the open() file calling */
if (!mutex_trylock(&adev->psp.mutex))
return -EBUSY;
/*
* make sure only one userpace process is alive for dumping so that
* only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
* let's say the case where one process try opening the file while
* another one has proceeded to read or release. In this way, eliminate
* the use of mutex for read() or release() callback as well.
*/
if (adev->psp.spirom_dump_trip) {
mutex_unlock(&adev->psp.mutex);
return -EBUSY;
}
bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
if (!bo_triplet) {
mutex_unlock(&adev->psp.mutex);
return -ENOMEM;
}
ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&bo_triplet->bo,
&bo_triplet->mc_addr,
&bo_triplet->cpu_addr);
if (ret)
goto rel_trip;
ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
if (ret)
goto rel_bo;
adev->psp.spirom_dump_trip = bo_triplet;
mutex_unlock(&adev->psp.mutex);
return 0;
rel_bo:
amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
&bo_triplet->cpu_addr);
rel_trip:
kfree(bo_triplet);
mutex_unlock(&adev->psp.mutex);
dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
return ret;
}
static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
loff_t *pos)
{
struct amdgpu_device *adev = filp->f_inode->i_private;
struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
if (!bo_triplet)
return -EINVAL;
return simple_read_from_buffer(buf,
size,
pos, bo_triplet->cpu_addr,
AMD_VBIOS_FILE_MAX_SIZE_B * 2);
}
static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
{
struct amdgpu_device *adev = filp->f_inode->i_private;
struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
if (bo_triplet) {
amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
&bo_triplet->cpu_addr);
kfree(bo_triplet);
}
adev->psp.spirom_dump_trip = NULL;
return 0;
}
static const struct file_operations psp_dump_spirom_debugfs_ops = {
.owner = THIS_MODULE,
.open = psp_read_spirom_debugfs_open,
.read = psp_read_spirom_debugfs_read,
.release = psp_read_spirom_debugfs_release,
.llseek = default_llseek,
};
#endif
void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
#endif
}
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,

View File

@@ -39,6 +39,18 @@
#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
/* VBIOS gfl defines */
#define MBOX_READY_MASK 0x80000000
#define MBOX_STATUS_MASK 0x0000FFFF
#define MBOX_COMMAND_MASK 0x00FF0000
#define MBOX_READY_FLAG 0x80000000
#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO 0xf
#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
extern const struct attribute_group amdgpu_flash_attr_group;
enum psp_shared_mem_size {
@@ -138,6 +150,7 @@ struct psp_funcs {
int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*dump_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
@@ -322,6 +335,14 @@ struct psp_runtime_scpm_entry {
enum psp_runtime_scpm_authentication scpm_status;
};
#if defined(CONFIG_DEBUG_FS)
struct spirom_bo {
struct amdgpu_bo *bo;
uint64_t mc_addr;
void *cpu_addr;
};
#endif
struct psp_context {
struct amdgpu_device *adev;
struct psp_ring km_ring;
@@ -409,6 +430,9 @@ struct psp_context {
char *vbflash_tmp_buf;
size_t vbflash_image_size;
bool vbflash_done;
#if defined(CONFIG_DEBUG_FS)
struct spirom_bo *spirom_dump_trip;
#endif
};
struct amdgpu_psp_funcs {
@@ -467,6 +491,10 @@ struct amdgpu_psp_funcs {
((psp)->funcs->update_spirom ? \
(psp)->funcs->update_spirom((psp), fw_pri_mc_addr) : -EINVAL)
#define psp_dump_spirom(psp, fw_pri_mc_addr) \
((psp)->funcs->dump_spirom ? \
(psp)->funcs->dump_spirom((psp), fw_pri_mc_addr) : -EINVAL)
#define psp_vbflash_status(psp) \
((psp)->funcs->vbflash_stat ? \
(psp)->funcs->vbflash_stat((psp)) : -EINVAL)
@@ -578,6 +606,7 @@ int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id,
bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev);
int amdgpu_psp_reg_program_no_ring(struct psp_context *psp, uint32_t val,
enum psp_reg_prog_id id);
void amdgpu_psp_debugfs_init(struct amdgpu_device *adev);
#endif

View File

@@ -2889,6 +2889,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
return -EINVAL;
}
return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
adev->umc.retire_unit);
}
@@ -2903,7 +2904,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
&adev->psp.ras_context.ras->eeprom_control;
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
uint32_t i;
uint32_t i = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
@@ -2924,34 +2925,36 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
if (from_rom) {
for (i = 0; i < pages; i++) {
if (control->ras_num_recs - i >= adev->umc.retire_unit) {
if ((bps[i].address == bps[i + 1].address) &&
(bps[i].mem_channel == bps[i + 1].mem_channel)) {
//deal with retire_unit records a time
ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
goto free;
i += (adev->umc.retire_unit - 1);
/* there is no pa recs in V3, so skip pa recs processing */
if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
for (i = 0; i < pages; i++) {
if (control->ras_num_recs - i >= adev->umc.retire_unit) {
if ((bps[i].address == bps[i + 1].address) &&
(bps[i].mem_channel == bps[i + 1].mem_channel)) {
/* deal with retire_unit records a time */
ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
control->ras_num_bad_pages -= adev->umc.retire_unit;
i += (adev->umc.retire_unit - 1);
} else {
break;
}
} else {
break;
}
} else {
break;
}
}
for (; i < pages; i++) {
ret = __amdgpu_ras_convert_rec_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
goto free;
control->ras_num_bad_pages -= adev->umc.retire_unit;
}
} else {
ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
free:
if (from_rom)
kfree(err_data.err_addr);
mutex_unlock(&con->recovery_lock);
@@ -3040,21 +3043,28 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
dev_err(adev->dev, "Failed to load EEPROM table records!");
} else {
if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
for (i = 0; i < control->ras_num_recs; i++) {
if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
if ((bps[i].address == bps[i + 1].address) &&
(bps[i].mem_channel == bps[i + 1].mem_channel)) {
control->ras_num_pa_recs += adev->umc.retire_unit;
i += (adev->umc.retire_unit - 1);
/*In V3, there is no pa recs, and some cases(when address==0) may be parsed
as pa recs, so add verion check to avoid it.
*/
if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
for (i = 0; i < control->ras_num_recs; i++) {
if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
if ((bps[i].address == bps[i + 1].address) &&
(bps[i].mem_channel == bps[i + 1].mem_channel)) {
control->ras_num_pa_recs += adev->umc.retire_unit;
i += (adev->umc.retire_unit - 1);
} else {
control->ras_num_mca_recs +=
(control->ras_num_recs - i);
break;
}
} else {
control->ras_num_mca_recs +=
(control->ras_num_recs - i);
control->ras_num_mca_recs += (control->ras_num_recs - i);
break;
}
} else {
control->ras_num_mca_recs += (control->ras_num_recs - i);
break;
}
} else {
control->ras_num_mca_recs = control->ras_num_recs;
}
}
@@ -3463,6 +3473,10 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
control->ras_num_pa_recs = control->ras_num_recs;
if (adev->umc.ras &&
adev->umc.ras->get_retire_flip_bits)
adev->umc.ras->get_retire_flip_bits(adev);
if (control->ras_num_recs) {
ret = amdgpu_ras_load_bad_pages(adev);
if (ret)
@@ -4484,8 +4498,11 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
u64 event_id;
if (amdgpu_ras_mark_ras_event(adev, type))
if (amdgpu_ras_mark_ras_event(adev, type)) {
dev_err(adev->dev,
"uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
return;
}
event_id = amdgpu_ras_acquire_event_id(adev, type);

View File

@@ -2081,6 +2081,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
amdgpu_doorbell_fini(adev);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);

View File

@@ -767,6 +767,7 @@ FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version);
static struct attribute *fw_attrs[] = {
&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
@@ -781,7 +782,7 @@ static struct attribute *fw_attrs[] = {
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
&dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
&dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,
NULL
&dev_attr_pldm_fw_version.attr, NULL
};
#define to_dev_attr(x) container_of(x, struct device_attribute, attr)

View File

@@ -602,6 +602,7 @@ struct amdgpu_firmware {
void *fw_buf_ptr;
uint64_t fw_buf_mc;
uint32_t pldm_version;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);

View File

@@ -529,6 +529,7 @@ int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
pfns[i] = err_data.err_addr[i].retired_page;
}
ret = i;
adev->umc.err_addr_cnt = err_data.err_addr_cnt;
out:
kfree(err_data.err_addr);

View File

@@ -78,6 +78,18 @@
#define UMC_NPS_SHIFT 40
#define UMC_NPS_MASK 0xffULL
/* three column bits and one row bit in MCA address flip
* in bad page retirement
*/
#define RETIRE_FLIP_BITS_NUM 4
struct amdgpu_umc_flip_bits {
uint32_t flip_bits_in_pa[RETIRE_FLIP_BITS_NUM];
uint32_t flip_row_bit;
uint32_t r13_in_pa;
uint32_t bit_num;
};
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -100,6 +112,7 @@ struct amdgpu_umc_ras {
bool dump_addr);
uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
uint64_t mca_addr, uint64_t retired_page);
void (*get_retire_flip_bits)(struct amdgpu_device *adev);
};
struct amdgpu_umc_funcs {
@@ -130,6 +143,10 @@ struct amdgpu_umc {
/* active mask for umc node instance */
unsigned long active_mask;
struct amdgpu_umc_flip_bits flip_bits;
unsigned long err_addr_cnt;
};
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);

View File

@@ -240,17 +240,17 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
drm_gem_object_put(gobj);
/* Pin the BO before generating the index, unpin in queue destroy */
r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
r = amdgpu_bo_reserve(db_obj->obj, true);
if (r) {
drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
goto unref_bo;
}
r = amdgpu_bo_reserve(db_obj->obj, true);
/* Pin the BO before generating the index, unpin in queue destroy */
r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
if (r) {
drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
goto unpin_bo;
goto unresv_bo;
}
switch (db_info->queue_type) {
@@ -286,7 +286,8 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
unpin_bo:
amdgpu_bo_unpin(db_obj->obj);
unresv_bo:
amdgpu_bo_unreserve(db_obj->obj);
unref_bo:
amdgpu_bo_unref(&db_obj->obj);
return r;
@@ -301,7 +302,7 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
struct amdgpu_usermode_queue *queue;
int r = 0;
cancel_delayed_work(&uq_mgr->resume_work);
cancel_delayed_work_sync(&uq_mgr->resume_work);
mutex_lock(&uq_mgr->userq_mutex);
queue = amdgpu_userq_find(uq_mgr, queue_id);
@@ -311,9 +312,13 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
return -EINVAL;
}
amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
amdgpu_bo_unpin(queue->db_obj.obj);
r = amdgpu_bo_reserve(queue->db_obj.obj, true);
if (!r) {
amdgpu_bo_unpin(queue->db_obj.obj);
amdgpu_bo_unreserve(queue->db_obj.obj);
}
amdgpu_bo_unref(&queue->db_obj.obj);
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
mutex_unlock(&uq_mgr->userq_mutex);
@@ -389,6 +394,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
*
* This will also make sure we have a valid eviction fence ready to be used.
*/
mutex_lock(&adev->userq_mutex);
amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
uq_funcs = adev->userq_funcs[args->in.ip_type];
@@ -451,7 +457,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
}
/* don't map the queue if scheduling is halted */
mutex_lock(&adev->userq_mutex);
if (adev->userq_halt_for_enforce_isolation &&
((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
@@ -461,7 +466,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (!skip_map_queue) {
r = amdgpu_userq_map_helper(uq_mgr, queue);
if (r) {
mutex_unlock(&adev->userq_mutex);
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
idr_remove(&uq_mgr->userq_idr, qid);
amdgpu_userq_fence_driver_free(queue);
@@ -470,13 +474,13 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
goto unlock;
}
}
mutex_unlock(&adev->userq_mutex);
args->out.queue_id = qid;
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
mutex_unlock(&adev->userq_mutex);
return r;
}
@@ -746,7 +750,7 @@ amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
if (evf_mgr->fd_closing) {
cancel_delayed_work(&uq_mgr->resume_work);
cancel_delayed_work_sync(&uq_mgr->resume_work);
return;
}
@@ -777,24 +781,25 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
struct amdgpu_userq_mgr *uqm, *tmp;
uint32_t queue_id;
cancel_delayed_work(&userq_mgr->resume_work);
cancel_delayed_work_sync(&userq_mgr->resume_work);
mutex_lock(&adev->userq_mutex);
mutex_lock(&userq_mgr->userq_mutex);
idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
amdgpu_userq_unmap_helper(userq_mgr, queue);
amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
}
mutex_lock(&adev->userq_mutex);
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
if (uqm == userq_mgr) {
list_del(&uqm->list);
break;
}
}
mutex_unlock(&adev->userq_mutex);
idr_destroy(&userq_mgr->userq_idr);
mutex_unlock(&userq_mgr->userq_mutex);
mutex_unlock(&adev->userq_mutex);
mutex_destroy(&userq_mgr->userq_mutex);
}

View File

@@ -181,7 +181,7 @@ void amdgpu_userq_fence_driver_destroy(struct kref *ref)
unsigned long index, flags;
struct dma_fence *f;
spin_lock(&fence_drv->fence_list_lock);
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
f = &fence->base;
@@ -193,7 +193,7 @@ void amdgpu_userq_fence_driver_destroy(struct kref *ref)
list_del(&fence->link);
dma_fence_put(f);
}
spin_unlock(&fence_drv->fence_list_lock);
spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
xa_lock_irqsave(xa, flags);
xa_for_each(xa, index, xa_fence_drv)
@@ -859,8 +859,10 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
num_fences = dma_fence_dedup_array(fences, num_fences);
waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
if (!waitq)
if (!waitq) {
r = -EINVAL;
goto free_fences;
}
for (i = 0, cnt = 0; i < num_fences; i++) {
struct amdgpu_userq_fence_driver *fence_drv;

View File

@@ -709,10 +709,10 @@ void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
struct amdgpu_xcp_cfg *xcp_cfg;
int i;
if (!adev->xcp_mgr)
if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
return;
xcp_cfg = adev->xcp_mgr->xcp_cfg;
xcp_cfg = adev->xcp_mgr->xcp_cfg;
for (i = 0; i < xcp_cfg->num_res; i++) {
xcp_res = &xcp_cfg->xcp_res[i];
kobject_put(&xcp_res->kobj);

View File

@@ -448,6 +448,49 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
return 0;
}
static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
int px_mode, int *num_xcp,
uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
switch (px_mode) {
case AMDGPU_SPX_PARTITION_MODE:
*num_xcp = 1;
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
break;
case AMDGPU_DPX_PARTITION_MODE:
*num_xcp = 2;
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_TPX_PARTITION_MODE:
*num_xcp = 3;
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_QPX_PARTITION_MODE:
*num_xcp = 4;
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
*num_xcp = NUM_XCC(adev->gfx.xcc_mask);
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
if (amdgpu_sriov_vf(adev))
*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:
return -EINVAL;
}
return 0;
}
static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
int mode,
struct amdgpu_xcp_cfg *xcp_cfg)
@@ -455,7 +498,7 @@ static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
struct amdgpu_device *adev = xcp_mgr->adev;
int max_res[AMDGPU_XCP_RES_MAX] = {};
bool res_lt_xcp;
int num_xcp, i;
int num_xcp, i, r;
u16 nps_modes;
if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
@@ -466,34 +509,9 @@ static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
num_xcp = 1;
nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
break;
case AMDGPU_DPX_PARTITION_MODE:
num_xcp = 2;
nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_TPX_PARTITION_MODE:
num_xcp = 3;
nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_QPX_PARTITION_MODE:
num_xcp = 4;
nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
num_xcp = NUM_XCC(adev->gfx.xcc_mask);
nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
default:
return -EINVAL;
}
r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
if (r)
return r;
xcp_cfg->compatible_nps_modes =
(adev->gmc.supported_nps_modes & nps_modes);
@@ -543,30 +561,31 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
enum amdgpu_gfx_partition mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
int num_xcc, num_xccs_per_xcp;
int num_xcc, num_xccs_per_xcp, r;
int num_xcp, nps_mode;
u16 supp_nps_modes;
bool comp_mode;
nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
&supp_nps_modes);
if (r)
return false;
comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
return comp_mode && num_xcc > 0;
case AMDGPU_DPX_PARTITION_MODE:
return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
return comp_mode && (num_xcc % 4) == 0;
case AMDGPU_TPX_PARTITION_MODE:
return (adev->gmc.num_mem_partitions == 1 ||
adev->gmc.num_mem_partitions == 3) &&
((num_xcc % 3) == 0);
return comp_mode && ((num_xcc % 3) == 0);
case AMDGPU_QPX_PARTITION_MODE:
num_xccs_per_xcp = num_xcc / 4;
return (adev->gmc.num_mem_partitions == 1 ||
adev->gmc.num_mem_partitions == 4) &&
(num_xccs_per_xcp >= 2);
return comp_mode && (num_xccs_per_xcp >= 2);
case AMDGPU_CPX_PARTITION_MODE:
/* (num_xcc > 1) because 1 XCC is considered SPX, not CPX.
* (num_xcc % adev->gmc.num_mem_partitions) == 0 because
* num_compute_partitions can't be less than num_mem_partitions
*/
return ((num_xcc > 1) &&
(num_xcc % adev->gmc.num_mem_partitions) == 0);
return comp_mode && (num_xcc > 1);
default:
return false;
}

View File

@@ -1153,6 +1153,12 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break;
case IP_VERSION(9, 5, 0):
if (adev->gfx.mec_fw_version >= 21) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break;
default:
break;
}
@@ -1267,6 +1273,22 @@ static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
}
}
/* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1
* DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain
* bit in SET_RESOURCES
*/
static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
return;
data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1);
data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data);
}
static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
int xcc_id)
{
@@ -1311,6 +1333,7 @@ static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
gfx_v9_4_3_xcc_init_sq(adev, xcc_id);
}
static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
@@ -1323,6 +1346,20 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
adev->gfx.config.db_debug2 =
RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
/* ToDo: GC 9.4.4 */
case IP_VERSION(9, 4, 3):
if (adev->gfx.mec_fw_version >= 184)
adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
break;
case IP_VERSION(9, 5, 0):
if (adev->gfx.mec_fw_version >= 23)
adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
break;
default:
break;
}
for (i = 0; i < num_xcc; i++)
gfx_v9_4_3_xcc_constants_init(adev, i);
}
@@ -3452,9 +3489,7 @@ static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
{
/*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
adev->gfx.mec_fw_version >= 0x0000009b)
if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
return true;
else
dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");

View File

@@ -748,6 +748,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
/* The mall_size is already calculated as mall_size_per_umc * num_umc.
* However, for gfx1151, which features a 2-to-1 UMC mapping,
* the result must be multiplied by 2 to determine the actual mall size.
*/
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 5, 1):
adev->gmc.mall_size *= 2;
break;
default:
break;
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):

View File

@@ -1502,7 +1502,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras;
break;
@@ -2072,6 +2071,9 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)

View File

@@ -28,11 +28,13 @@
#include "soc15d.h"
#include "jpeg_v4_0_3.h"
#include "jpeg_v5_0_1.h"
#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -163,14 +165,9 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
1 + j + 11 * jpeg_inst;
} else {
if (j < 4)
ring->doorbell_index =
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
4 + j + 32 * jpeg_inst;
else
ring->doorbell_index =
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
8 + j + 32 * jpeg_inst;
ring->doorbell_index =
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
2 + j + 32 * jpeg_inst;
}
sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
@@ -237,7 +234,10 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
int i, j, r, jpeg_inst;
if (amdgpu_sriov_vf(adev)) {
/* jpeg_v5_0_1_start_sriov(adev); */
r = jpeg_v5_0_1_start_sriov(adev);
if (r)
return r;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
@@ -291,8 +291,10 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
cancel_delayed_work_sync(&adev->jpeg.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
return ret;
}
@@ -422,6 +424,119 @@ static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring)
reg_offset);
}
static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
uint64_t ctx_addr;
uint32_t param, resp, expected;
uint32_t tmp, timeout;
struct amdgpu_mm_table *table = &adev->virt.mm_table;
uint32_t *table_loc;
uint32_t table_size;
uint32_t size, size_dw, item_offset;
uint32_t init_status;
int i, j, jpeg_inst;
struct mmsch_v5_0_cmd_direct_write
direct_wt = { {0} };
struct mmsch_v5_0_cmd_end end = { {0} };
struct mmsch_v5_0_init_header header;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
end.cmd_header.command_type =
MMSCH_COMMAND__END;
for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
jpeg_inst = GET_INST(JPEG, i);
memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
header.version = MMSCH_VERSION;
header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
table_loc = (uint32_t *)table->cpu_addr;
table_loc += header.total_size;
item_offset = header.total_size;
for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) {
ring = &adev->jpeg.inst[i].ring_dec[j];
table_size = 0;
tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW);
MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr));
tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH);
MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr));
tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE);
MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4);
if (j < 5) {
header.mjpegdec0[j].table_offset = item_offset;
header.mjpegdec0[j].init_status = 0;
header.mjpegdec0[j].table_size = table_size;
} else {
header.mjpegdec1[j - 5].table_offset = item_offset;
header.mjpegdec1[j - 5].init_status = 0;
header.mjpegdec1[j - 5].table_size = table_size;
}
header.total_size += table_size;
item_offset += table_size;
}
MMSCH_V5_0_INSERT_END();
/* send init table to MMSCH */
size = sizeof(struct mmsch_v5_0_init_header);
table_loc = (uint32_t *)table->cpu_addr;
memcpy((void *)table_loc, &header, size);
ctx_addr = table->gpu_addr;
WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID);
tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp);
size = header.total_size;
WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size);
WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0);
param = 0x00000001;
WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param);
tmp = 0;
timeout = 1000;
resp = 0;
expected = MMSCH_VF_MAILBOX_RESP__OK;
init_status =
((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status;
while (resp != expected) {
resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP);
if (resp != 0)
break;
udelay(10);
tmp = tmp + 10;
if (tmp >= timeout) {
DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
" waiting for regMMSCH_VF_MAILBOX_RESP "\
"(expected=0x%08x, readback=0x%08x)\n",
tmp, expected, resp);
return -EBUSY;
}
}
if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE &&
init_status != MMSCH_VF_ENGINE_STATUS__PASS)
DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n",
resp, init_status);
}
return 0;
}
/**
* jpeg_v5_0_1_start - start JPEG block
*
@@ -581,6 +696,11 @@ static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev)) {
adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
if (state == adev->jpeg.cur_state)
return 0;

View File

@@ -0,0 +1,144 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __MMSCH_V5_0_H__
#define __MMSCH_V5_0_H__
#include "amdgpu_vcn.h"
#define MMSCH_VERSION_MAJOR 5
#define MMSCH_VERSION_MINOR 0
#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
#define RB_ENABLED (1 << 0)
#define RB4_ENABLED (1 << 1)
#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
#define MMSCH_VF_MAILBOX_RESP__OK 0x1
#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
#define MMSCH_VF_MAILBOX_RESP__FAILED 0x3
#define MMSCH_VF_MAILBOX_RESP__FAILED_SMALL_CTX_SIZE 0x4
#define MMSCH_VF_MAILBOX_RESP__UNKNOWN_CMD 0x5
enum mmsch_v5_0_command_type {
MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
MMSCH_COMMAND__END = 0xf
};
struct mmsch_v5_0_table_info {
uint32_t init_status;
uint32_t table_offset;
uint32_t table_size;
};
struct mmsch_v5_0_init_header {
uint32_t version;
uint32_t total_size;
struct mmsch_v5_0_table_info vcn0;
struct mmsch_v5_0_table_info mjpegdec0[5];
struct mmsch_v5_0_table_info mjpegdec1[5];
};
struct mmsch_v5_0_cmd_direct_reg_header {
uint32_t reg_offset : 28;
uint32_t command_type : 4;
};
struct mmsch_v5_0_cmd_indirect_reg_header {
uint32_t reg_offset : 20;
uint32_t reg_idx_space : 8;
uint32_t command_type : 4;
};
struct mmsch_v5_0_cmd_direct_write {
struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
uint32_t reg_value;
};
struct mmsch_v5_0_cmd_direct_read_modify_write {
struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
uint32_t write_data;
uint32_t mask_value;
};
struct mmsch_v5_0_cmd_direct_polling {
struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
uint32_t mask_value;
uint32_t wait_value;
};
struct mmsch_v5_0_cmd_end {
struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
};
struct mmsch_v5_0_cmd_indirect_write {
struct mmsch_v5_0_cmd_indirect_reg_header cmd_header;
uint32_t reg_value;
};
#define MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
size = sizeof(struct mmsch_v5_0_cmd_direct_read_modify_write); \
size_dw = size / 4; \
direct_rd_mod_wt.cmd_header.reg_offset = reg; \
direct_rd_mod_wt.mask_value = mask; \
direct_rd_mod_wt.write_data = data; \
memcpy((void *)table_loc, &direct_rd_mod_wt, size); \
table_loc += size_dw; \
table_size += size_dw; \
}
#define MMSCH_V5_0_INSERT_DIRECT_WT(reg, value) { \
size = sizeof(struct mmsch_v5_0_cmd_direct_write); \
size_dw = size / 4; \
direct_wt.cmd_header.reg_offset = reg; \
direct_wt.reg_value = value; \
memcpy((void *)table_loc, &direct_wt, size); \
table_loc += size_dw; \
table_size += size_dw; \
}
#define MMSCH_V5_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
size = sizeof(struct mmsch_v5_0_cmd_direct_polling); \
size_dw = size / 4; \
direct_poll.cmd_header.reg_offset = reg; \
direct_poll.mask_value = mask; \
direct_poll.wait_value = wait; \
memcpy((void *)table_loc, &direct_poll, size); \
table_loc += size_dw; \
table_size += size_dw; \
}
#define MMSCH_V5_0_INSERT_END() { \
size = sizeof(struct mmsch_v5_0_cmd_end); \
size_dw = size / 4; \
memcpy((void *)table_loc, &end, size); \
table_loc += size_dw; \
table_size += size_dw; \
}
#endif

View File

@@ -71,15 +71,6 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin");
/* Retry times for vmbx ready wait */
#define PSP_VMBX_POLLING_LIMIT 3000
/* VBIOS gfl defines */
#define MBOX_READY_MASK 0x80000000
#define MBOX_STATUS_MASK 0x0000FFFF
#define MBOX_COMMAND_MASK 0x00FF0000
#define MBOX_READY_FLAG 0x80000000
#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
@@ -741,7 +732,8 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
/* Ring the doorbell */
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_73, 1);
if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE ||
cmd == C2PMSG_CMD_SPI_GET_FLASH_IMAGE)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
@@ -797,6 +789,37 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
return 0;
}
static int psp_v13_0_dump_spirom(struct psp_context *psp,
uint64_t fw_pri_mc_addr)
{
struct amdgpu_device *adev = psp->adev;
int ret;
/* Confirm PSP is ready to start */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, false);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
}
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr));
ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO);
if (ret)
return ret;
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr));
ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI);
if (ret)
return ret;
ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_FLASH_IMAGE);
return ret;
}
static int psp_v13_0_vbflash_status(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -929,6 +952,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw,
.read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw,
.update_spirom = psp_v13_0_update_spirom,
.dump_spirom = psp_v13_0_dump_spirom,
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.get_ras_capability = psp_v13_0_get_ras_capability,

View File

@@ -174,19 +174,76 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
static void umc_v12_0_get_retire_flip_bits(struct amdgpu_device *adev)
{
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
uint32_t vram_type = adev->gmc.vram_type;
struct amdgpu_umc_flip_bits *flip_bits = &(adev->umc.flip_bits);
if (adev->gmc.gmc_funcs->query_mem_partition_mode)
nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
/* default setting */
flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
flip_bits->flip_row_bit = 13;
flip_bits->bit_num = 4;
flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
if (nps == AMDGPU_NPS2_PARTITION_MODE) {
flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
} else if (nps == AMDGPU_NPS4_PARTITION_MODE) {
flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
}
switch (vram_type) {
case AMDGPU_VRAM_TYPE_HBM:
/* other nps modes are taken as nps1 */
if (nps == AMDGPU_NPS2_PARTITION_MODE)
flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
else if (nps == AMDGPU_NPS4_PARTITION_MODE)
flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
break;
case AMDGPU_VRAM_TYPE_HBM3E:
flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
flip_bits->flip_row_bit = 12;
if (nps == AMDGPU_NPS2_PARTITION_MODE)
flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
else if (nps == AMDGPU_NPS4_PARTITION_MODE)
flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
break;
default:
dev_warn(adev->dev,
"Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
break;
}
adev->umc.retire_unit = 0x1 << flip_bits->bit_num;
}
static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
struct ta_ras_query_address_input *addr_in,
struct ta_ras_query_address_output *addr_out,
bool dump_addr)
{
uint32_t col, col_lower, row, row_lower, bank;
uint32_t col, col_lower, row, row_lower, row_high, bank;
uint32_t channel_index = 0, umc_inst = 0;
uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS];
uint32_t i, bit_num, retire_unit, *flip_bits;
uint64_t soc_pa, column, err_addr;
struct ta_ras_query_address_output addr_out_tmp;
struct ta_ras_query_address_output *paddr_out;
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
if (!addr_out)
@@ -211,53 +268,46 @@ static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
umc_inst = addr_in->ma.umc_inst;
}
loop_bits[0] = UMC_V12_0_PA_C2_BIT;
loop_bits[1] = UMC_V12_0_PA_C3_BIT;
loop_bits[2] = UMC_V12_0_PA_C4_BIT;
loop_bits[3] = UMC_V12_0_PA_R13_BIT;
if (adev->gmc.gmc_funcs->query_mem_partition_mode)
nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
/* other nps modes are taken as nps1 */
if (nps == AMDGPU_NPS2_PARTITION_MODE) {
loop_bits[0] = UMC_V12_0_PA_CH5_BIT;
loop_bits[1] = UMC_V12_0_PA_C2_BIT;
loop_bits[2] = UMC_V12_0_PA_B1_BIT;
loop_bits[3] = UMC_V12_0_PA_R12_BIT;
}
if (nps == AMDGPU_NPS4_PARTITION_MODE) {
loop_bits[0] = UMC_V12_0_PA_CH4_BIT;
loop_bits[1] = UMC_V12_0_PA_CH5_BIT;
loop_bits[2] = UMC_V12_0_PA_B0_BIT;
loop_bits[3] = UMC_V12_0_PA_R11_BIT;
}
flip_bits = adev->umc.flip_bits.flip_bits_in_pa;
bit_num = adev->umc.flip_bits.bit_num;
retire_unit = adev->umc.retire_unit;
soc_pa = paddr_out->pa.pa;
channel_index = paddr_out->pa.channel_idx;
/* clear loop bits in soc physical address */
for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
soc_pa &= ~BIT_ULL(loop_bits[i]);
for (i = 0; i < bit_num; i++)
soc_pa &= ~BIT_ULL(flip_bits[i]);
paddr_out->pa.pa = soc_pa;
/* get column bit 0 and 1 in mca address */
col_lower = (err_addr >> 1) & 0x3ULL;
/* MA_R13_BIT will be handled later */
/* extra row bit will be handled later */
row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL;
row_lower &= ~BIT_ULL(adev->umc.flip_bits.flip_row_bit);
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 5, 0)) {
row_high = (soc_pa >> adev->umc.flip_bits.r13_in_pa) & 0x3ULL;
/* it's 2.25GB in each channel, from MCA address to PA
* [R14 R13] is converted if the two bits value are 0x3,
* get them from PA instead of MCA address.
*/
row_lower |= (row_high << 13);
}
if (!err_data && !dump_addr)
goto out;
/* loop for all possibilities of retired bits */
for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) {
for (column = 0; column < retire_unit; column++) {
soc_pa = paddr_out->pa.pa;
for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]);
for (i = 0; i < bit_num; i++)
soc_pa |= (((column >> i) & 0x1ULL) << flip_bits[i]);
col = ((column & 0x7) << 2) | col_lower;
/* add row bit 13 */
row = ((column >> 3) << 13) | row_lower;
/* handle extra row bit */
if (bit_num == RETIRE_FLIP_BITS_NUM)
row = ((column >> 3) << adev->umc.flip_bits.flip_row_bit) |
row_lower;
if (dump_addr)
dev_info(adev->dev,
@@ -435,8 +485,12 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank
bank->regs[ACA_REG_IDX_ADDR]);
ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
count = ext_error_code == 0 ?
ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
if (umc_v12_0_is_deferred_error(adev, status))
count = ext_error_code == 0 ?
adev->umc.err_addr_cnt / adev->umc.retire_unit : 1ULL;
else
count = ext_error_code == 0 ?
ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
return aca_error_cache_log_bank_error(handle, &info, err_type, count);
}
@@ -476,8 +530,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
struct ta_ras_query_address_output addr_out;
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
uint32_t shift_bit = UMC_V12_0_PA_C4_BIT;
uint32_t shift_bit = adev->umc.flip_bits.flip_bits_in_pa[2];
int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
@@ -522,14 +575,6 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT;
ecc_err->channel_idx = addr_out.pa.channel_idx;
if (adev->gmc.gmc_funcs->query_mem_partition_mode)
nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
if (nps == AMDGPU_NPS2_PARTITION_MODE)
shift_bit = UMC_V12_0_PA_B1_BIT;
if (nps == AMDGPU_NPS4_PARTITION_MODE)
shift_bit = UMC_V12_0_PA_B0_BIT;
/* If converted pa_pfn is 0, use pa C4 pfn. */
if (!ecc_err->pa_pfn)
ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT;
@@ -675,5 +720,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.update_ecc_status = umc_v12_0_update_ecc_status,
.convert_ras_err_addr = umc_v12_0_convert_error_address,
.get_die_id_from_pa = umc_v12_0_get_die_id,
.get_retire_flip_bits = umc_v12_0_get_retire_flip_bits,
};

View File

@@ -55,8 +55,6 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */
#define UMC_V12_0_RETIRE_LOOP_BITS 4
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
@@ -64,6 +62,7 @@
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
#define UMC_V12_0_PA_R0_BIT 22
#define UMC_V12_0_PA_R10_BIT 32
#define UMC_V12_0_PA_R11_BIT 33
#define UMC_V12_0_PA_R12_BIT 34
#define UMC_V12_0_PA_R13_BIT 35

View File

@@ -1034,6 +1034,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
/* Keeping one read-back to ensure all register writes are done, otherwise
* it may introduce race conditions */
RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL);
return 0;
}
@@ -1216,6 +1220,10 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
/* Keeping one read-back to ensure all register writes are done, otherwise
* it may introduce race conditions */
RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
return 0;
}

View File

@@ -30,6 +30,7 @@
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
#include "vcn_v4_0_3.h"
#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
@@ -39,6 +40,7 @@
#include <drm/drm_drv.h>
static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
@@ -126,7 +128,14 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 11 * vcn_inst;
if (!amdgpu_sriov_vf(adev))
ring->doorbell_index =
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
11 * vcn_inst;
else
ring->doorbell_index =
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
32 * vcn_inst;
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
@@ -143,6 +152,12 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
vcn_v5_0_0_alloc_ip_dump(adev);
return amdgpu_vcn_sysfs_reset_mask_init(adev);
@@ -172,6 +187,9 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -204,24 +222,38 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
if (ring->use_doorbell)
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
11 * vcn_inst),
adev->vcn.inst[i].aid_id);
/* Re-init fw_shared, if required */
vcn_v5_0_1_fw_shared_init(adev, i);
r = amdgpu_ring_test_helper(ring);
if (amdgpu_sriov_vf(adev)) {
r = vcn_v5_0_1_start_sriov(adev);
if (r)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
ring = &adev->vcn.inst[i].ring_enc[0];
ring->wptr = 0;
ring->wptr_old = 0;
vcn_v5_0_1_unified_ring_set_wptr(ring);
ring->sched.ready = true;
}
} else {
if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
if (ring->use_doorbell)
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
11 * vcn_inst),
adev->vcn.inst[i].aid_id);
/* Re-init fw_shared, if required */
vcn_v5_0_1_fw_shared_init(adev, i);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
}
}
return 0;
@@ -663,6 +695,195 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
return 0;
}
static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
{
int i, vcn_inst;
struct amdgpu_ring *ring_enc;
uint64_t cache_addr;
uint64_t rb_enc_addr;
uint64_t ctx_addr;
uint32_t param, resp, expected;
uint32_t offset, cache_size;
uint32_t tmp, timeout;
struct amdgpu_mm_table *table = &adev->virt.mm_table;
uint32_t *table_loc;
uint32_t table_size;
uint32_t size, size_dw;
uint32_t init_status;
uint32_t enabled_vcn;
struct mmsch_v5_0_cmd_direct_write
direct_wt = { {0} };
struct mmsch_v5_0_cmd_direct_read_modify_write
direct_rd_mod_wt = { {0} };
struct mmsch_v5_0_cmd_end end = { {0} };
struct mmsch_v5_0_init_header header;
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
end.cmd_header.command_type = MMSCH_COMMAND__END;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
vcn_inst = GET_INST(VCN, i);
vcn_v5_0_1_fw_shared_init(adev, vcn_inst);
memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
header.version = MMSCH_VERSION;
header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
table_loc = (uint32_t *)table->cpu_addr;
table_loc += header.total_size;
table_size = 0;
MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
offset = 0;
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr));
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = cache_size;
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_VCPU_CACHE_SIZE0),
cache_size);
cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_VCPU_CACHE_OFFSET1), 0);
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE;
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_VCPU_CACHE_OFFSET2), 0);
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
rb_setup = &fw_shared->rb_setup;
ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
ring_enc->wptr = 0;
rb_enc_addr = ring_enc->gpu_addr;
rb_setup->is_rb_enabled_flags |= RB_ENABLED;
rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
rb_setup->rb_size = ring_enc->ring_size / 4;
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
regUVD_VCPU_NONCACHE_SIZE0),
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
MMSCH_V5_0_INSERT_END();
header.vcn0.init_status = 0;
header.vcn0.table_offset = header.total_size;
header.vcn0.table_size = table_size;
header.total_size += table_size;
/* Send init table to mmsch */
size = sizeof(struct mmsch_v5_0_init_header);
table_loc = (uint32_t *)table->cpu_addr;
memcpy((void *)table_loc, &header, size);
ctx_addr = table->gpu_addr;
WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
size = header.total_size;
WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
param = 0x00000001;
WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
tmp = 0;
timeout = 1000;
resp = 0;
expected = MMSCH_VF_MAILBOX_RESP__OK;
while (resp != expected) {
resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
if (resp != 0)
break;
udelay(10);
tmp = tmp + 10;
if (tmp >= timeout) {
DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
" waiting for regMMSCH_VF_MAILBOX_RESP "\
"(expected=0x%08x, readback=0x%08x)\n",
tmp, expected, resp);
return -EBUSY;
}
}
enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status;
if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
&& init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
"status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
}
}
return 0;
}
/**
* vcn_v5_0_1_start - VCN start
*
@@ -1103,8 +1324,18 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = vinst->adev;
int ret = 0;
/* for SRIOV, guest should not control VCN Power-gating
* MMSCH FW should control Power-gating and clock-gating
* guest should avoid touching CGC and PG
*/
if (amdgpu_sriov_vf(adev)) {
vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
if (state == vinst->cur_state)
return 0;

View File

@@ -175,8 +175,7 @@ static bool event_interrupt_isr_v10(struct kfd_node *dev,
data[0], data[1], data[2], data[3], data[4], data[5], data[6],
data[7]);
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
if (pasid == 0)
return 0;
/* Interrupt types we care about: various signals and faults.

View File

@@ -287,8 +287,7 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev,
data[0], data[1], data[2], data[3], data[4], data[5], data[6],
data[7]);
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
if (pasid == 0)
return false;
/* Interrupt types we care about: various signals and faults.

View File

@@ -31,6 +31,7 @@
#define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0)
#define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1)
#define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2)
#define OVER_SUBSCRIPTION_XNACK_CONFLICT (1 << 3)
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
unsigned int buffer_size_bytes)
@@ -44,7 +45,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
int *over_subscription)
int *over_subscription,
int xnack_conflict)
{
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
@@ -73,6 +75,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT;
if (gws_queue_count > 1)
*over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT;
if (xnack_conflict && (node->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
*over_subscription |= OVER_SUBSCRIPTION_XNACK_CONFLICT;
if (*over_subscription)
dev_dbg(dev, "Over subscribed runlist\n");
@@ -96,7 +100,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int **rl_buffer,
uint64_t *rl_gpu_buffer,
unsigned int *rl_buffer_size,
int *is_over_subscription)
int *is_over_subscription,
int xnack_conflict)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
@@ -105,7 +110,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
if (WARN_ON(pm->allocated))
return -EINVAL;
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription,
xnack_conflict);
mutex_lock(&pm->lock);
@@ -142,11 +148,27 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct queue *q;
struct kernel_queue *kq;
int is_over_subscription;
int xnack_enabled = -1;
bool xnack_conflict = 0;
rl_wptr = retval = processes_mapped = 0;
/* Check if processes set different xnack modes */
list_for_each_entry(cur, queues, list) {
qpd = cur->qpd;
if (xnack_enabled < 0)
/* First process */
xnack_enabled = qpd->pqm->process->xnack_enabled;
else if (qpd->pqm->process->xnack_enabled != xnack_enabled) {
/* Found a process with a different xnack mode */
xnack_conflict = 1;
break;
}
}
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
&alloc_size_bytes, &is_over_subscription);
&alloc_size_bytes, &is_over_subscription,
xnack_conflict);
if (retval)
return retval;
@@ -156,9 +178,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->active_queue_count);
build_runlist_ib:
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
qpd = cur->qpd;
/* group processes with the same xnack mode together */
if (qpd->pqm->process->xnack_enabled != xnack_enabled)
continue;
/* build map process packet */
if (processes_mapped >= pm->dqm->processes_count) {
dev_dbg(dev, "Not enough space left in runlist IB\n");
@@ -215,18 +241,26 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
alloc_size_bytes);
}
}
if (xnack_conflict) {
/* pick up processes with the other xnack mode */
xnack_enabled = !xnack_enabled;
xnack_conflict = 0;
goto build_runlist_ib;
}
dev_dbg(dev, "Finished map process and queues to runlist\n");
if (is_over_subscription) {
if (!pm->is_over_subscription)
dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n",
is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
" too many processes." : "",
is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
" too many queues." : "",
is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
" multiple processes using cooperative launch." : "");
dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s%s. Expect reduced ROCm performance.\n",
is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
" too many processes" : "",
is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
" too many queues" : "",
is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
" multiple processes using cooperative launch" : "",
is_over_subscription & OVER_SUBSCRIPTION_XNACK_CONFLICT ?
" xnack on/off processes mixed on gfx9" : "");
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr,

View File

@@ -203,6 +203,8 @@ static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
if (pm->dqm->dev->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)
packet->bitfields2.enb_xnack_retry_disable_check = 1;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;

View File

@@ -63,7 +63,8 @@ struct pm4_mes_set_resources {
struct {
uint32_t vmid_mask:16;
uint32_t unmap_latency:8;
uint32_t reserved1:5;
uint32_t reserved1:4;
uint32_t enb_xnack_retry_disable_check:1;
enum mes_set_resources_queue_type_enum queue_type:3;
} bitfields2;
uint32_t ordinal2;

View File

@@ -38,6 +38,7 @@ AMDGPUDM = \
amdgpu_dm_pp_smu.o \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
amdgpu_dm_quirks.o \
amdgpu_dm_wb.o
ifdef CONFIG_DRM_AMD_DC_FP

View File

@@ -80,7 +80,6 @@
#include <linux/power_supply.h>
#include <linux/firmware.h>
#include <linux/component.h>
#include <linux/dmi.h>
#include <linux/sort.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -374,6 +373,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
if (new_state->stream->adjust.timing_adjust_pending)
return true;
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
@@ -866,7 +867,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
struct dmub_notification *notify)
{
DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n");
drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n");
}
/**
@@ -963,7 +964,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
entry.param0, entry.param1);
DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
entry.trace_code, entry.tick_count, entry.param0, entry.param1);
} else
break;
@@ -973,7 +974,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
} while (count <= DMUB_TRACE_MAX_READ);
if (count > DMUB_TRACE_MAX_READ)
DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ");
if (dc_enable_dmub_notifications(adev->dm.dc) &&
irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
@@ -1677,153 +1678,6 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
struct amdgpu_dm_quirks {
bool aux_hpd_discon;
bool support_edp0_on_dp1;
};
static struct amdgpu_dm_quirks quirk_entries = {
.aux_hpd_discon = false,
.support_edp0_on_dp1 = false
};
static int edp0_on_dp1_callback(const struct dmi_system_id *id)
{
quirk_entries.support_edp0_on_dp1 = true;
return 0;
}
static int aux_hpd_discon_callback(const struct dmi_system_id *id)
{
quirk_entries.aux_hpd_discon = true;
return 0;
}
static const struct dmi_system_id dmi_quirk_table[] = {
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
},
},
{}
/* TODO: refactor this from a fixed table to a dynamic option */
};
static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
{
int dmi_id;
struct drm_device *dev = dm->ddev;
dm->aux_hpd_discon_quirk = false;
init_data->flags.support_edp0_on_dp1 = false;
dmi_id = dmi_check_system(dmi_quirk_table);
if (!dmi_id)
return;
if (quirk_entries.aux_hpd_discon) {
dm->aux_hpd_discon_quirk = true;
drm_info(dev, "aux_hpd_discon_quirk attached\n");
}
if (quirk_entries.support_edp0_on_dp1) {
init_data->flags.support_edp0_on_dp1 = true;
drm_info(dev, "support_edp0_on_dp1 attached\n");
}
}
void*
dm_allocate_gpu_mem(
@@ -2110,7 +1964,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
retrieve_dmi_info(&adev->dm, &init_data);
retrieve_dmi_info(&adev->dm);
if (adev->dm.edp0_on_dp1_quirk)
init_data.flags.support_edp0_on_dp1 = true;
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -2200,7 +2056,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
drm_err(adev_to_drm(adev),
"amdgpu: failed to initialize freesync_module.\n");
} else
DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
amdgpu_dm_init_color_mod();
@@ -2222,7 +2078,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (!adev->dm.hdcp_workqueue)
drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n");
else
DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
dc_init_callbacks(adev->dm.dc, &init_params);
}
@@ -2299,7 +2155,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#endif
DRM_DEBUG_DRIVER("KMS initialized.\n");
drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n");
return 0;
error:
@@ -5097,7 +4953,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
} else
DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@@ -6749,7 +6605,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
m_pref = list_first_entry_or_null(
&aconnector->base.modes, struct drm_display_mode, head);
if (!m_pref) {
DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n");
return NULL;
}
}
@@ -6924,7 +6780,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n",
__func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
@@ -6944,7 +6800,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
__func__, drm_connector->name);
}
}
@@ -7053,7 +6909,7 @@ create_stream_for_sink(struct drm_connector *connector,
* case, we call set mode ourselves to restore the previous mode
* and the modelist may not be filled in time.
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
drm_dbg_driver(dev, "No preferred mode found\n");
} else if (aconnector) {
recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
@@ -9201,7 +9057,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
@@ -9209,7 +9065,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
}
}
@@ -10836,6 +10692,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct dm_atomic_state *dm_state = NULL;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct dc_stream_state *new_stream;
struct amdgpu_device *adev = dm->adev;
int ret = 0;
/*
@@ -10889,7 +10746,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
*/
if (!new_stream) {
DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n",
__func__, acrtc->base.base.id);
ret = -ENOMEM;
goto fail;
@@ -10927,7 +10784,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false;
DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d",
new_crtc_state->mode_changed);
}
}
@@ -10965,7 +10822,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
DRM_DEBUG_DRIVER(
drm_dbg_driver(adev_to_drm(adev),
"Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
@@ -10986,7 +10843,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (ret)
goto fail;
DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n",
crtc->base.id);
/* i.e. reset mode */
@@ -12844,7 +12701,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
payload->address, payload->length,
p_notify->result);
}
*operation_result = AUX_RET_ERROR_INVALID_REPLY;
*operation_result = p_notify->result;
goto out;
}
@@ -12853,7 +12710,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
/* The reply is stored in the top nibble of the command. */
payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
if (!payload->write && p_notify->aux_reply.length)
/*write req may receive a byte indicating partially written number as well*/
if (p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);

View File

@@ -618,6 +618,13 @@ struct amdgpu_display_manager {
*/
bool aux_hpd_discon_quirk;
/**
* @edp0_on_dp1_quirk:
*
* quirk for platforms that put edp0 on DP1.
*/
bool edp0_on_dp1_quirk;
/**
* @dpia_aux_lock:
*
@@ -1068,4 +1075,6 @@ void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector);
void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector);
int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector);
void retrieve_dmi_info(struct amdgpu_display_manager *dm);
#endif /* __AMDGPU_DM_H__ */

View File

@@ -62,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum aux_return_code_type operation_result;
struct amdgpu_device *adev;
struct ddc_service *ddc;
uint8_t copy[16];
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -77,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
payload.defer_delay = 0;
if (payload.write) {
memcpy(copy, msg->buffer, msg->size);
payload.data = copy;
}
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
@@ -100,9 +106,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
*/
if (payload.write && result >= 0) {
if (result) {
/*one byte indicating partially written bytes. Force 0 to retry*/
drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n");
result = 0;
/*one byte indicating partially written bytes*/
drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
result = payload.data[0];
} else if (!payload.reply[0])
/*I2C_ACK|AUX_ACK*/
result = msg->size;
@@ -127,11 +133,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
}
if (payload.reply[0])
drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
payload.reply[0]);
return result;

View File

@@ -0,0 +1,178 @@
// SPDX-License-Identifier: MIT
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <linux/dmi.h>
#include "amdgpu.h"
#include "amdgpu_dm.h"
struct amdgpu_dm_quirks {
bool aux_hpd_discon;
bool support_edp0_on_dp1;
};
static struct amdgpu_dm_quirks quirk_entries = {
.aux_hpd_discon = false,
.support_edp0_on_dp1 = false
};
static int edp0_on_dp1_callback(const struct dmi_system_id *id)
{
quirk_entries.support_edp0_on_dp1 = true;
return 0;
}
static int aux_hpd_discon_callback(const struct dmi_system_id *id)
{
quirk_entries.aux_hpd_discon = true;
return 0;
}
static const struct dmi_system_id dmi_quirk_table[] = {
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
},
},
{
.callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
},
},
{}
/* TODO: refactor this from a fixed table to a dynamic option */
};
void retrieve_dmi_info(struct amdgpu_display_manager *dm)
{
struct drm_device *dev = dm->ddev;
int dmi_id;
dm->aux_hpd_discon_quirk = false;
dm->edp0_on_dp1_quirk = false;
dmi_id = dmi_check_system(dmi_quirk_table);
if (!dmi_id)
return;
if (quirk_entries.aux_hpd_discon) {
dm->aux_hpd_discon_quirk = true;
drm_info(dev, "aux_hpd_discon_quirk attached\n");
}
if (quirk_entries.support_edp0_on_dp1) {
dm->edp0_on_dp1_quirk = true;
drm_info(dev, "support_edp0_on_dp1 attached\n");
}
}

View File

@@ -36,6 +36,7 @@
#include "resource.h"
#include "dc_state.h"
#include "dc_state_priv.h"
#include "dc_plane.h"
#include "dc_plane_priv.h"
#include "dc_stream_priv.h"
@@ -440,9 +441,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* Don't adjust DRR while there's bandwidth optimizations pending to
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX)
if (dc->optimized_required || dc->wm_optimized_required)
if (dc->ctx->dce_version > DCE_VERSION_MAX) {
if (dc->optimized_required || dc->wm_optimized_required) {
stream->adjust.timing_adjust_pending = true;
return false;
}
}
dc_exit_ips_for_hw_access(dc);
@@ -2330,11 +2334,15 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
for (i = 0; i < params->stream_count; i++) {
struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
struct dc_sink *sink = stream->sink;
/* revalidate streams */
res = dc_validate_stream(dc, stream);
if (res != DC_OK)
return res;
if (!dc_is_virtual_signal(sink->sink_signal)) {
res = dc_validate_stream(dc, stream);
if (res != DC_OK)
return res;
}
dc_stream_log(dc, stream);
@@ -3240,7 +3248,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->crtc_timing_adjust) {
if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max)
stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max ||
stream->adjust.timing_adjust_pending)
update->crtc_timing_adjust->timing_adjust_pending = true;
stream->adjust = *update->crtc_timing_adjust;
update->crtc_timing_adjust->timing_adjust_pending = false;
@@ -3320,7 +3329,7 @@ static void backup_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
scratch->plane_states[i] = *status->plane_states[i];
dc_plane_copy_config(&scratch->plane_states[i], status->plane_states[i]);
}
scratch->stream_state = *stream;
}
@@ -3336,10 +3345,7 @@ static void restore_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
/* refcount will always be valid, restore everything else */
struct kref refcount = status->plane_states[i]->refcount;
*status->plane_states[i] = scratch->plane_states[i];
status->plane_states[i]->refcount = refcount;
dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]);
}
*stream = scratch->stream_state;
}
@@ -4244,12 +4250,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FAST)
continue;
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
}
stream_status =
stream_get_status(context, pipe_ctx->stream);
@@ -4258,6 +4258,25 @@ static void commit_planes_for_stream(struct dc *dc,
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (!pipe_ctx->plane_state)
continue;
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
}
}
if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);

View File

@@ -3911,6 +3911,10 @@ enum dc_status resource_map_pool_resources(
if (!dc->link_srv->dp_decide_link_settings(stream,
&pipe_ctx->link_config.dp_link_settings))
return DC_FAIL_DP_LINK_BANDWIDTH;
dc->link_srv->dp_decide_tunnel_settings(stream,
&pipe_ctx->link_config.dp_tunnel_settings);
if (dc->link_srv->dp_get_encoding_format(
&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
@@ -5521,6 +5525,14 @@ struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
}
static bool resource_allocate_mcache(struct dc_state *context, const struct dc_mcache_params *mcache_params)
{
if (context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config)
context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config(context, mcache_params);
return true;
}
void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
{
dml2_options->callbacks.dc = dc;
@@ -5540,6 +5552,7 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status;
dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id;
dml2_options->callbacks.get_max_flickerless_instant_vtotal_increase = &dc_stream_get_max_flickerless_instant_vtotal_increase;
dml2_options->callbacks.allocate_mcache = &resource_allocate_mcache;
dml2_options->svp_pstate.callbacks.dc = dc;
dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;

View File

@@ -109,7 +109,8 @@ struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
*****************************************************************************
*/
const struct dc_plane_status *dc_plane_get_status(
const struct dc_plane_state *plane_state)
const struct dc_plane_state *plane_state,
union dc_plane_status_update_flags flags)
{
const struct dc_plane_status *plane_status;
struct dc *dc;
@@ -136,7 +137,7 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
if (pipe_ctx->plane_state)
if (pipe_ctx->plane_state && flags.bits.address)
pipe_ctx->plane_state->status.is_flip_pending = false;
break;
@@ -151,7 +152,8 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
dc->hwss.update_pending_status(pipe_ctx);
if (flags.bits.address)
dc->hwss.update_pending_status(pipe_ctx);
}
return plane_status;
@@ -294,3 +296,17 @@ void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
dc->hwss.clear_surface_dcc_and_tiling(pipe_ctx, plane_state, clear_tiling);
}
}
void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src)
{
struct kref temp_refcount;
/* backup persistent info */
memcpy(&temp_refcount, &dst->refcount, sizeof(struct kref));
/* copy all configuration information */
memcpy(dst, src, sizeof(struct dc_plane_state));
/* restore persistent info */
memcpy(&dst->refcount, &temp_refcount, sizeof(struct kref));
}

View File

@@ -53,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
#define DC_VER "3.2.331"
#define DC_VER "3.2.334"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC

View File

@@ -39,6 +39,7 @@
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
#define GPINT_RETRY_NUM 20
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)
@@ -207,7 +208,7 @@ static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_sr
return false;
do {
status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
@@ -247,6 +248,9 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
} else {
res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
}
if (res)
res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK;
}
return res;
@@ -1885,11 +1889,14 @@ void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struc
if (command_code == DMUB_GPINT__INVALID_COMMAND)
return;
// send gpint commands and wait for ack
if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
(uint16_t)(output->ips_mode),
&output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
output->residency_percent = 0;
for (i = 0; i < GPINT_RETRY_NUM; i++) {
// false could mean GPINT timeout, in which case we should retry
if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
(uint16_t)(output->ips_mode), &output->residency_percent,
DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
break;
udelay(100);
}
if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
(uint16_t)(output->ips_mode),

View File

@@ -159,6 +159,11 @@ struct dc_link_settings {
uint8_t link_rate_set;
};
struct dc_tunnel_settings {
bool should_enable_dp_tunneling;
bool should_use_dp_bw_allocation;
};
union dc_dp_ffe_preset {
struct {
uint8_t level : 4;
@@ -943,10 +948,20 @@ union dpia_info {
uint8_t raw;
};
/* DPCD[0xE0020] USB4_DRIVER_BW_CAPABILITY register. */
union usb4_driver_bw_cap {
struct {
uint8_t rsvd :7;
uint8_t driver_bw_alloc_support :1;
} bits;
uint8_t raw;
};
/* DP Tunneling over USB4 */
struct dpcd_usb4_dp_tunneling_info {
union dp_tun_cap_support dp_tun_cap;
union dpia_info dpia_info;
union usb4_driver_bw_cap driver_bw_cap;
uint8_t usb4_driver_id;
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
@@ -1486,5 +1501,11 @@ struct dp_trace {
# ifndef DP_TUNNELING_BW_ALLOC_CAP_CHANGED
# define DP_TUNNELING_BW_ALLOC_CAP_CHANGED (1 << 3)
# endif
# ifndef DPTX_BW_ALLOC_UNMASK_IRQ
# define DPTX_BW_ALLOC_UNMASK_IRQ (1 << 6)
# endif
# ifndef DPTX_BW_ALLOC_MODE_ENABLE
# define DPTX_BW_ALLOC_MODE_ENABLE (1 << 7)
# endif
#endif /* DC_DP_TYPES_H */

View File

@@ -28,13 +28,24 @@
#include "dc_hw_types.h"
union dc_plane_status_update_flags {
struct {
uint32_t address : 1;
} bits;
uint32_t raw;
};
struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
const struct dc_plane_status *dc_plane_get_status(
const struct dc_plane_state *plane_state);
const struct dc_plane_state *plane_state,
union dc_plane_status_update_flags flags);
void dc_plane_state_retain(struct dc_plane_state *plane_state);
void dc_plane_state_release(struct dc_plane_state *plane_state);
void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
bool clear_tiling);
void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src);
#endif /* _DC_PLANE_H_ */

View File

@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
.do_urgent_latency_adjustment = 1,
.do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)

View File

@@ -916,7 +916,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
}
//TODO : Could be possibly moved to a common helper layer.
static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id)
static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id)
{
int i, j;
@@ -924,10 +924,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str
return false;
for (i = 0; i < context->stream_count; i++) {
for (j = 0; j < context->stream_status[i].plane_count; j++) {
if (context->stream_status[i].plane_states[j] == plane) {
*plane_id = (i << 16) | j;
return true;
if (context->streams[i]->stream_id == stream_id) {
for (j = 0; j < context->stream_status[i].plane_count; j++) {
if (context->stream_status[i].plane_states[j] == plane) {
*plane_id = (i << 16) | j;
return true;
}
}
}
}
@@ -950,14 +952,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d
return location;
}
static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx,
unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id,
const struct dc_plane_state *plane, const struct dc_state *context)
{
unsigned int plane_id;
int i = 0;
int location = -1;
if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) {
if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) {
ASSERT(false);
return -1;
}
@@ -1043,7 +1045,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
} else {
for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) {
disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context);
disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context);
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
@@ -1054,7 +1056,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true;
/* apply forced pstate policy */

View File

@@ -11,6 +11,7 @@ struct dc_state;
struct dcn_watermarks;
union dcn_watermark_set;
struct pipe_ctx;
struct dc_plane_state;
struct dml2_context;
struct dml2_configuration_options;
@@ -25,4 +26,5 @@ void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_se
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled);
unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context);
#endif

View File

@@ -12,6 +12,8 @@
#include "dml21_translation_helper.h"
#include "dml2_dc_resource_mgmt.h"
#define INVALID -1
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
*dml_ctx = vzalloc(sizeof(struct dml2_context));
@@ -208,10 +210,40 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
}
}
static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params)
{
int dc_plane_idx = 0;
int dml_prog_idx, stream_idx, plane_idx;
struct dml2_per_plane_programming *pln_prog = NULL;
for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) {
for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) {
dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context);
if (dml_prog_idx == INVALID) {
continue;
}
pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid;
mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0;
mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1;
mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache;
mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1;
memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0,
pln_prog->mcache_allocation.mcache_x_offsets_plane0,
sizeof(int) * (DML2_MAX_MCACHES + 1));
memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1,
pln_prog->mcache_allocation.mcache_x_offsets_plane1,
sizeof(int) * (DML2_MAX_MCACHES + 1));
dc_plane_idx++;
}
}
}
static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
{
bool result = false;
struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming;
struct dc_mcache_params mcache_params[MAX_PLANES] = {0};
memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
@@ -246,6 +278,14 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state);
/* if subvp phantoms are present, expand them into dc context */
dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx);
if (in_dc->res_pool->funcs->program_mcache_pipe_config) {
//Prepare mcache params for each plane based on mcache output from DML
dml21_prepare_mcache_params(dml_ctx, context, mcache_params);
//populate mcache regs to each pipe
dml_ctx->config.callbacks.allocate_mcache(context, mcache_params);
}
}
/* Copy DML CLK, WM and REG outputs to bandwidth context */

View File

@@ -8,6 +8,7 @@
#include "os_types.h"
#include "dml_top_soc_parameter_types.h"
#include "dml_top_display_cfg_types.h"
struct dc;
struct dc_state;
@@ -65,4 +66,67 @@ struct socbb_ip_params_external {
struct dml2_ip_capabilities ip_params;
struct dml2_soc_bb soc_bb;
};
/*mcache parameters decided by dml*/
struct dc_mcache_params {
bool valid;
/*
* For iMALL, dedicated mall mcaches are required (sharing of last
* slice possible), for legacy phantom or phantom without return
* the only mall mcaches need to be valid.
*/
bool requires_dedicated_mall_mcache;
unsigned int num_mcaches_plane0;
unsigned int num_mcaches_plane1;
/*
* Generally, plane0/1 slices must use a disjoint set of caches
* but in some cases the final segement of the two planes can
* use the same cache. If plane0_plane1 is set, then this is
* allowed.
*
* Similarly, the caches allocated to MALL prefetcher are generally
* disjoint, but if mall_prefetch is set, then the final segment
* between the main and the mall pixel requestor can use the same
* cache.
*
* Note that both bits may be set at the same time.
*/
struct {
bool mall_comb_mcache_p0;
bool mall_comb_mcache_p1;
bool plane0_plane1;
} last_slice_sharing;
/*
* A plane is divided into vertical slices of mcaches,
* which wrap on the surface width.
*
* For example, if the surface width is 7680, and split into
* three slices of equal width, the boundary array would contain
* [2560, 5120, 7680]
*
* The assignments are
* 0 = [0 .. 2559]
* 1 = [2560 .. 5119]
* 2 = [5120 .. 7679]
* 0 = [7680 .. INF]
* The final element implicitly is the same as the first, and
* at first seems invalid since it is never referenced (since)
* it is outside the surface. However, its useful when shifting
* (see below).
*
* For any given valid mcache assignment, a shifted version, wrapped
* on the surface width boundary is also assumed to be valid.
*
* For example, shifting [2560, 5120, 7680] by -50 results in
* [2510, 5170, 7630].
*
* The assignments are now:
* 0 = [0 .. 2509]
* 1 = [2510 .. 5169]
* 2 = [5170 .. 7629]
* 0 = [7630 .. INF]
*/
int mcache_x_offsets_plane0[DML2_MAX_MCACHES + 1];
int mcache_x_offsets_plane1[DML2_MAX_MCACHES + 1];
};
#endif

View File

@@ -40,6 +40,7 @@ struct dc_sink;
struct dc_stream_state;
struct resource_context;
struct display_stream_compressor;
struct dc_mcache_params;
// Configuration of the MALL on the SoC
struct dml2_soc_mall_info {
@@ -107,6 +108,7 @@ struct dml2_dc_callbacks {
unsigned int (*get_max_flickerless_instant_vtotal_increase)(
struct dc_stream_state *stream,
bool is_gaming);
bool (*allocate_mcache)(struct dc_state *context, const struct dc_mcache_params *mcache_params);
};
struct dml2_dc_svp_callbacks {

View File

@@ -120,10 +120,11 @@ void dpp401_set_cursor_attributes(
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
int cur_rom_en = 0;
// DCN4 should always do Cursor degamma for Cursor Color modes
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
cur_rom_en = 1;
if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
cur_rom_en = 1;
}
}
REG_UPDATE_3(CURSOR0_CONTROL,

View File

@@ -1032,7 +1032,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
.hubp_clear_tiling = hubp2_clear_tiling,
.hubp_clear_tiling = hubp401_clear_tiling,
};
bool hubp401_construct(

View File

@@ -2053,7 +2053,7 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
if (pipe->plane_state) {
ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(

View File

@@ -1550,7 +1550,7 @@ static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
const struct dc *dc = pipe_ctx->stream->link->dc;
if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false)
return false;
// Not necessary for MST configurations

View File

@@ -525,11 +525,11 @@ bool dcn401_program_rmcm_luts(
enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable;
enum hubp_3dlut_fl_mode mode;
enum hubp_3dlut_fl_addressing_mode addr_mode;
enum hubp_3dlut_fl_format format;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
enum hubp_3dlut_fl_width width;
enum hubp_3dlut_fl_format format = 0;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
enum hubp_3dlut_fl_width width = 0;
struct dc *dc = hubp->ctx->dc;
bool bypass_rmcm_3dlut = false;
@@ -654,9 +654,9 @@ void dcn401_populate_mcm_luts(struct dc *dc,
enum hubp_3dlut_fl_mode mode;
enum hubp_3dlut_fl_width width = 0;
enum hubp_3dlut_fl_addressing_mode addr_mode;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
@@ -2260,9 +2260,9 @@ void dcn401_program_pipe(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
}
if (pipe_ctx->update_flags.raw ||
(pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
pipe_ctx->stream->update_flags.raw)
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
pipe_ctx->plane_state->update_flags.raw ||
pipe_ctx->stream->update_flags.raw))
dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
@@ -2361,7 +2361,7 @@ void dcn401_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
if (pipe->plane_state) {
if (pipe->plane_state->triplebuffer_flips)
BREAK_TO_DEBUGGER();

View File

@@ -65,6 +65,7 @@ struct resource_pool;
struct dc_state;
struct resource_context;
struct clk_bw_params;
struct dc_mcache_params;
struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
@@ -220,6 +221,8 @@ struct resource_funcs {
unsigned int (*get_max_hw_cursor_size)(const struct dc *dc,
struct dc_state *state,
const struct dc_stream_state *stream);
bool (*program_mcache_pipe_config)(struct dc_state *context,
const struct dc_mcache_params *mcache_params);
};
struct audio_support{
@@ -384,7 +387,9 @@ struct link_resource {
struct link_config {
struct dc_link_settings dp_link_settings;
struct dc_tunnel_settings dp_tunnel_settings;
};
union pipe_update_flags {
struct {
uint32_t enable : 1;

View File

@@ -207,6 +207,9 @@ struct link_service {
bool (*dp_decide_link_settings)(
struct dc_stream_state *stream,
struct dc_link_settings *link_setting);
void (*dp_decide_tunnel_settings)(
struct dc_stream_state *stream,
struct dc_tunnel_settings *dp_tunnel_setting);
enum dp_link_encoding (*mst_decide_link_encoding_format)(
const struct dc_link *link);
bool (*edp_decide_link_settings)(struct dc_link *link,

View File

@@ -32,6 +32,7 @@
#define MEMORY_TYPE_MULTIPLIER_CZ 4
#define MEMORY_TYPE_HBM 2
#define MAX_MCACHES 8
#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0)
@@ -65,6 +66,13 @@ struct resource_straps {
uint32_t audio_stream_number;
};
struct dc_mcache_allocations {
int global_mcache_ids_plane0[MAX_MCACHES + 1];
int global_mcache_ids_plane1[MAX_MCACHES + 1];
int global_mcache_ids_mall_plane0[MAX_MCACHES + 1];
int global_mcache_ids_mall_plane1[MAX_MCACHES + 1];
};
struct resource_create_funcs {
void (*read_dce_straps)(
struct dc_context *ctx, struct resource_straps *straps);

View File

@@ -37,36 +37,9 @@
#include "ivsrcid/ivsrcid_vislands30.h"
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -46,36 +46,9 @@
#include "dc_types.h"
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
DC_HPD1_INT_STATUS,
DC_HPD1_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
DC_HPD1_INT_CONTROL,
DC_HPD1_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -391,5 +364,3 @@ struct irq_service *dal_irq_service_dce60_create(
dce60_irq_construct(irq_service, init_data);
return irq_service;
}

View File

@@ -37,36 +37,9 @@
#include "dc_types.h"
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
DC_HPD1_INT_STATUS,
DC_HPD1_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
DC_HPD1_INT_CONTROL,
DC_HPD1_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -303,5 +276,3 @@ struct irq_service *dal_irq_service_dce80_create(
dce80_irq_construct(irq_service, init_data);
return irq_service;
}

View File

@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_servic
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn20(
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -80,36 +80,9 @@ static enum dc_irq_source to_dal_irq_source_dcn201(
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -132,36 +132,9 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic
return DC_IRQ_SOURCE_INVALID;
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -139,36 +139,9 @@ static enum dc_irq_source to_dal_irq_source_dcn30(
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -447,4 +420,3 @@ struct irq_service *dal_irq_service_dcn30_create(
dcn30_irq_construct(irq_service, init_data);
return irq_service;
}

View File

@@ -126,26 +126,9 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi
}
}
static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -77,26 +77,9 @@ static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_servi
}
}
static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -128,36 +128,9 @@ static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_servic
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_servi
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -135,36 +135,9 @@ static enum dc_irq_source to_dal_irq_source_dcn315(
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn32(
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -127,36 +127,9 @@ static enum dc_irq_source to_dal_irq_source_dcn35(
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -106,36 +106,9 @@ static enum dc_irq_source to_dal_irq_source_dcn351(
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -105,36 +105,9 @@ static enum dc_irq_source to_dal_irq_source_dcn36(
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -109,36 +109,9 @@ static enum dc_irq_source to_dal_irq_source_dcn401(
}
}
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
.ack = hpd_ack
.ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {

View File

@@ -41,6 +41,16 @@
#include "reg_helper.h"
#include "irq_service.h"
//HPD0_DC_HPD_INT_STATUS
#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
//HPD1_DC_HPD_INT_STATUS
#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED_MASK 0x10
#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED__SHIFT 0x4
#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK 0x100
#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY__SHIFT 0x8
#define CTX \
@@ -177,3 +187,57 @@ enum dc_irq_source dal_irq_service_to_irq_source(
src_id,
ext_id);
}
bool hpd0_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
HPD0_DC_HPD_INT_CONTROL,
DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}
bool hpd1_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
uint32_t current_status =
get_reg_field_value(
value,
DC_HPD1_INT_STATUS,
DC_HPD1_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
set_reg_field_value(
value,
current_status ? 0 : 1,
DC_HPD1_INT_CONTROL,
DC_HPD1_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
return true;
}

View File

@@ -82,4 +82,12 @@ void dal_irq_service_set_generic(
const struct irq_source_info *info,
bool enable);
bool hpd0_ack(
struct irq_service *irq_service,
const struct irq_source_info *info);
bool hpd1_ack(
struct irq_service *irq_service,
const struct irq_source_info *info);
#endif

View File

@@ -611,6 +611,7 @@ static bool detect_dp(struct dc_link *link,
link->dpcd_caps.dongle_type = sink_caps->dongle_type;
link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
link->dpcd_caps.dpcd_rev.raw = 0;
link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = 0;
}
return true;
@@ -1007,21 +1008,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
/*
* If this is DP over USB4 link then we need to:
* - Enable BW ALLOC support on DPtx if applicable
*/
if (dc->config.usb4_bw_alloc_support) {
if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) {
/* update with non reduced link cap if bw allocation mode is supported */
if (link->dpia_bw_alloc_config.nrd_max_link_rate &&
link->dpia_bw_alloc_config.nrd_max_lane_count) {
link->reported_link_cap.link_rate =
link->dpia_bw_alloc_config.nrd_max_link_rate;
link->reported_link_cap.lane_count =
link->dpia_bw_alloc_config.nrd_max_lane_count;
}
}
if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
&& link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) {
if (link_dpia_enable_usb4_dp_bw_alloc_mode(link) == false)
link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc = false;
}
break;
}

View File

@@ -148,6 +148,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_stream_state *streams[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
uint8_t count;
int i;
@@ -160,10 +161,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
/* The subsequent call to dc_commit_updates_for_stream for a full update
* will release the current state and swap to a new state. Releasing the
* current state results in the stream pointers in the pipe_ctx structs
* to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream.
*/
for (i = 0; i < count; i++)
streams[i] = pipes[i]->stream;
for (i = 0; i < count; i++) {
stream_update.stream = pipes[i]->stream;
stream_update.stream = streams[i];
dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
pipes[i]->stream, &stream_update,
streams[i], &stream_update,
state);
}
@@ -2365,7 +2374,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
deallocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -2433,7 +2442,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (link->connector_signal == SIGNAL_TYPE_EDP && dc->debug.psp_disabled_wa) {
/* reset internal save state to default since eDP is off */
enum dp_panel_mode panel_mode = dp_get_panel_mode(pipe_ctx->stream->link);
/* since current psp not loaded, we need to reset it to default*/
/* since current psp not loaded, we need to reset it to default */
link->panel_mode = panel_mode;
}
}
@@ -2611,7 +2620,7 @@ void link_set_dpms_on(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_hblank_reduction_on_rx(pipe_ctx);
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
allocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)

View File

@@ -156,6 +156,7 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_get_encoding_format = link_dp_get_encoding_format;
link_srv->dp_should_enable_fec = dp_should_enable_fec;
link_srv->dp_decide_link_settings = link_decide_link_settings;
link_srv->dp_decide_tunnel_settings = link_decide_dp_tunnel_settings;
link_srv->mst_decide_link_encoding_format =
mst_decide_link_encoding_format;
link_srv->edp_decide_link_settings = edp_decide_link_settings;

View File

@@ -158,6 +158,14 @@ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count)
{
/* Calculate offset for LTTPR closest to DPTX which is highest in the chain
* Offset is 0 for single LTTPR cases as base LTTPR DPCD addresses target LTTPR 1
*/
return DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE * (lttpr_count - 1);
}
uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
{
switch (bw) {
@@ -2013,11 +2021,9 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
/* Read DP tunneling information. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
status = dpcd_get_tunneling_device_data(link);
if (status != DC_OK)
dm_error("%s: Read DP tunneling device data failed.\n", __func__);
}
status = dpcd_get_tunneling_device_data(link);
if (status != DC_OK)
dm_error("%s: Read DP tunneling device data failed.\n", __func__);
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);

View File

@@ -48,6 +48,9 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
/* Convert PHY repeater count read from DPCD uint8_t. */
uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count);
/* Calculate embedded LTTPR address offset for vendor-specific behaviour */
uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count);
bool dp_is_sink_present(struct dc_link *link);
bool dp_is_lttpr_present(struct dc_link *link);

View File

@@ -62,6 +62,36 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling == false)
goto err;
link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) {
status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY,
dpcd_dp_tun_data, 1);
if (status != DC_OK)
goto err;
link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = dpcd_dp_tun_data[0];
}
DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) "
"DPIA_BW_Alloc_support=%d "
"CM_BW_Alloc_support=%d ",
__func__, link->link_index,
link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id,
link->dpcd_caps.usb4_dp_tun_info.dpia_info.bits.dpia_num,
link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc,
link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
status = core_link_read_dpcd(
link,
DP_USB4_ROUTER_TOPOLOGY_ID,
@@ -71,13 +101,6 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
@@ -120,3 +143,20 @@ bool dpia_query_hpd_status(struct dc_link *link)
return link->hpd_status;
}
void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
struct dc_tunnel_settings *dp_tunnel_setting)
{
struct dc_link *link = stream->link;
memset(dp_tunnel_setting, 0, sizeof(*dp_tunnel_setting));
if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT) || (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
dp_tunnel_setting->should_enable_dp_tunneling =
link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling;
if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
&& link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support)
dp_tunnel_setting->should_use_dp_bw_allocation = true;
}
}

View File

@@ -38,4 +38,10 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
* Returns true if HPD high.
*/
bool dpia_query_hpd_status(struct dc_link *link);
/* Decide the DP tunneling settings based on the DPCD capabilities
*/
void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
struct dc_tunnel_settings *dp_tunnel_setting);
#endif /* __DC_LINK_DPIA_H__ */

View File

@@ -46,9 +46,10 @@
*/
static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
return (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA
&& link->hpd_status
&& link->dpia_bw_alloc_config.bw_alloc_enabled);
return (link && link->hpd_status
&& link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
&& link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
}
static void reset_bw_alloc_struct(struct dc_link *link)
@@ -141,7 +142,7 @@ static int get_non_reduced_max_lane_count(struct dc_link *link)
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
* for host router and dpia
*/
static void init_usb4_bw_struct(struct dc_link *link)
static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link)
{
reset_bw_alloc_struct(link);
@@ -282,49 +283,26 @@ static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw)
// ------------------------------------------------------------------
// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
{
bool ret = false;
uint8_t response = 0,
bw_support_dpia = 0,
bw_support_cm = 0;
uint8_t val;
if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status))
goto out;
if (link->hpd_status) {
val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
if (core_link_read_dpcd(
link,
DP_TUNNELING_CAPABILITIES,
&response,
sizeof(uint8_t)) == DC_OK)
bw_support_dpia = (response >> 7) & 1;
if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
if (core_link_read_dpcd(
link,
USB4_DRIVER_BW_CAPABILITY,
&response,
sizeof(uint8_t)) == DC_OK)
bw_support_cm = (response >> 7) & 1;
retrieve_usb4_dp_bw_allocation_info(link);
/* Send request acknowledgment to Turn ON DPTX support */
if (bw_support_cm && bw_support_dpia) {
if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) {
link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
}
response = 0x80;
if (core_link_write_dpcd(
link,
DPTX_BW_ALLOCATION_MODE_CONTROL,
&response,
sizeof(uint8_t)) != DC_OK) {
DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n",
__func__, link->link_index);
} else {
// SUCCESS Enabled DPtx BW Allocation Mode Support
DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n",
__func__, link->link_index);
ret = true;
init_usb4_bw_struct(link);
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
ret = true;
/*
* During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
@@ -332,11 +310,12 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
* to make the CM to release preallocation and update estimated BW correctly for
* all DPIAs per host router
*/
// TODO: Zero allocation can be removed once the MSFT CM fix has been released
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
}
} else
DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
}
out:
return ret;
}
@@ -378,7 +357,8 @@ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
*/
void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
{
if (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dpia_bw_alloc_config.bw_alloc_enabled) {
if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpia_bw_alloc_config.bw_alloc_enabled) {
//1. Hot Plug
if (link->hpd_status && peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
@@ -401,7 +381,7 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
if (link_dp_is_bw_alloc_available(link))
link_dpia_send_bw_alloc_request(link, req_bw);
else
DC_LOG_DEBUG("%s: Not able to send the BW Allocation request", __func__);
DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__);
}
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)

View File

@@ -43,13 +43,13 @@ enum bw_type {
};
/*
* Enable BW Allocation Mode Support from the DP-Tx side
* Enable USB4 DP BW allocation mode
*
* @link: pointer to the dc_link struct instance
*
* return: SUCCESS or FAILURE
*/
bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link);
/*
* Allocates only what the stream needs for bw, so if:

View File

@@ -352,7 +352,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
irq_data->raw,
DP_SINK_STATUS - DP_SINK_COUNT + 1);
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
retval = core_link_read_dpcd(
link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
&irq_data->bytes.link_service_irq_esi0.raw, 1);
@@ -521,7 +521,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
dp_trace_link_loss_increment(link);
}
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
if (hpd_irq_dpcd_data.bytes.link_service_irq_esi0.bits.DP_LINK_TUNNELING_IRQ)
dp_handle_tunneling_irq(link);
}

View File

@@ -785,7 +785,6 @@ void override_training_settings(
lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
}
enum dc_dp_training_pattern decide_cr_training_pattern(

View File

@@ -142,6 +142,14 @@ void decide_8b_10b_training_settings(
lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
/* Some embedded LTTPRs rely on receiving TPS2 before LT to interop reliably with sensitive VGA dongles
* This allows these LTTPRs to minimize freq/phase and skew variation during lock and deskew sequences
*/
if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) ==
AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2) {
lt_settings->lttpr_early_tps2 = true;
}
}
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
@@ -173,6 +181,42 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
return LTTPR_MODE_NON_LTTPR;
}
static void set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(struct dc_link *link,
const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t lttpr_count)
{
/* Vendor-specific LTTPR early TPS2 sequence:
* 1. Output TPS2
* 2. Wait 400us
* 3. Set link settings as usual
* 4. Write TPS1 to DP_TRAINING_PATTERN_SET_PHY_REPEATERx targeting LTTPR closest to host
* 5. Wait 1ms
* 6. Begin link training as usual
* */
uint32_t closest_lttpr_address_offset = dp_get_closest_lttpr_offset(lttpr_count);
union dpcd_training_pattern dpcd_pattern = {0};
dpcd_pattern.v1_4.TRAINING_PATTERN_SET = 1;
dpcd_pattern.v1_4.SCRAMBLING_DISABLE = 1;
DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS2. Wait 400us.\n", __func__);
dp_set_hw_training_pattern(link, link_res, DP_TRAINING_PATTERN_SEQUENCE_2, DPRX);
dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
udelay(400);
dpcd_set_link_settings(link, lt_settings);
core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + closest_lttpr_address_offset, &dpcd_pattern.raw, 1);
udelay(1000);
}
enum link_training_result perform_8b_10b_clock_recovery_sequence(
struct dc_link *link,
const struct link_resource *link_res,
@@ -383,7 +427,7 @@ enum link_training_result dp_perform_8b_10b_link_training(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t repeater_cnt;
uint8_t repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
uint8_t repeater_id;
uint8_t lane = 0;
@@ -391,14 +435,16 @@ enum link_training_result dp_perform_8b_10b_link_training(
start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, lt_settings);
if (lt_settings->lttpr_early_tps2)
set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(link, link_res, lt_settings, repeater_cnt);
else
dpcd_set_link_settings(link, lt_settings);
if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
*/
repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
repeater_id--) {

View File

@@ -1299,7 +1299,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_v) {
dscl_prog_data->easf_v_en = true;
dscl_prog_data->easf_v_ring = 0;
dscl_prog_data->easf_v_sharp_factor = 0;
dscl_prog_data->easf_v_sharp_factor = 1;
dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode
/* 2-bit, BF3 chroma mode correction calculation mode */
@@ -1463,7 +1463,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_h) {
dscl_prog_data->easf_h_en = true;
dscl_prog_data->easf_h_ring = 0;
dscl_prog_data->easf_h_sharp_factor = 0;
dscl_prog_data->easf_h_sharp_factor = 1;
dscl_prog_data->easf_h_bf1_en =
1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_h_bf2_mode =

View File

@@ -480,6 +480,10 @@ enum sharpness_setting {
SHARPNESS_ZERO,
SHARPNESS_CUSTOM
};
enum sharpness_range_source {
SHARPNESS_RANGE_DCN = 0,
SHARPNESS_RANGE_DCN_OVERRIDE
};
struct spl_sharpness_range {
int sdr_rgb_min;
int sdr_rgb_max;

View File

@@ -445,6 +445,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*emul_get_inbox1_rptr)(struct dmub_srv *dmub);
uint32_t (*emul_get_inbox1_wptr)(struct dmub_srv *dmub);
void (*emul_set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
bool (*is_supported)(struct dmub_srv *dmub);
@@ -1053,4 +1055,16 @@ enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
uint32_t timeout_us,
uint32_t num_free_required);
/**
* dmub_srv_update_inbox_status() - Updates pending status for inbox & reg inbox0
* @dmub: the dmub service
*
* Return:
* DMUB_STATUS_OK - success
* DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
* DMUB_STATUS_HW_FAILURE - issue with HW programming
* DMUB_STATUS_INVALID - unspecified error
*/
enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub);
#endif /* _DMUB_SRV_H_ */

View File

@@ -550,6 +550,11 @@ union replay_hw_flags {
* @is_alpm_initialized: Indicates whether ALPM is initialized
*/
uint32_t is_alpm_initialized : 1;
/**
* @alpm_mode: Indicates ALPM mode selected
*/
uint32_t alpm_mode : 2;
} bitfields;
uint32_t u32All;
@@ -742,6 +747,14 @@ enum dmub_ips_disable_type {
DMUB_IPS_DISABLE_IPS2_Z10 = 4,
DMUB_IPS_DISABLE_DYNAMIC = 5,
DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6,
DMUB_IPS_DISABLE_Z8_RETENTION = 7,
};
enum dmub_ips_rcg_disable_type {
DMUB_IPS_RCG_ENABLE = 0,
DMUB_IPS0_RCG_DISABLE = 1,
DMUB_IPS1_RCG_DISABLE = 2,
DMUB_IPS_RCG_DISABLE = 3
};
#define DMUB_IPS1_ALLOW_MASK 0x00000001
@@ -820,11 +833,12 @@ enum dmub_shared_state_feature_id {
*/
union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */
uint32_t detection_required : 1; /**< 1 if detection is required */
uint32_t reserved_bits : 28; /**< Reversed */
uint32_t ips1z8_commit: 1; /**< 1 if in IPS1 Z8 Retention */
uint32_t reserved_bits : 27; /**< Reversed */
} bits;
uint32_t all;
};
@@ -839,7 +853,10 @@ union dmub_shared_state_ips_driver_signals {
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */
uint32_t reserved_bits : 27; /**< Reversed bits */
uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */
uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */
uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */
uint32_t reserved_bits : 24; /**< Reversed bits */
} bits;
uint32_t all;
};
@@ -868,7 +885,9 @@ struct dmub_shared_state_ips_fw {
uint32_t ips1_exit_count; /**< Exit counter for IPS1 */
uint32_t ips2_entry_count; /**< Entry counter for IPS2 */
uint32_t ips2_exit_count; /**< Exit counter for IPS2 */
uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */
uint32_t ips1_z8ret_entry_count; /**< Entry counter for IPS1 Z8 Retention */
uint32_t ips1_z8ret_exit_count; /**< Exit counter for IPS1 Z8 Retention */
uint32_t reserved[53]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
@@ -1256,6 +1275,10 @@ enum dmub_gpint_command {
* DESC: Setup debug configs.
*/
DMUB_GPINT__SETUP_DEBUG_MODE = 136,
/**
* DESC: Initiates IPS wake sequence.
*/
DMUB_GPINT__IPS_DEBUG_WAKE = 137,
};
/**
@@ -2116,6 +2139,11 @@ union dmub_cmd_fams2_config {
} stream_v1; //v1
};
struct dmub_fams2_config_v2 {
struct dmub_cmd_fams2_global_config global;
struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1
};
/**
* DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy)
*/
@@ -2124,6 +2152,22 @@ struct dmub_rb_cmd_fams2 {
union dmub_cmd_fams2_config config;
};
/**
* Indirect buffer descriptor
*/
struct dmub_ib_data {
union dmub_addr src; // location of indirect buffer in memory
uint16_t size; // indirect buffer size in bytes
};
/**
* DMUB rb command definition for commands passed over indirect buffer
*/
struct dmub_rb_cmd_ib {
struct dmub_cmd_header header;
struct dmub_ib_data ib_data;
};
/**
* enum dmub_cmd_idle_opt_type - Idle optimization command type.
*/
@@ -2147,6 +2191,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware notify power state.
*/
DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
/**
* DCN notify to release HW.
*/
DMUB_CMD__IDLE_OPT_RELEASE_HW = 4,
};
/**
@@ -2908,8 +2957,9 @@ enum dmub_cmd_fams_type {
*/
DMUB_CMD__FAMS_SET_MANUAL_TRIGGER = 3,
DMUB_CMD__FAMS2_CONFIG = 4,
DMUB_CMD__FAMS2_DRR_UPDATE = 5,
DMUB_CMD__FAMS2_FLIP = 6,
DMUB_CMD__FAMS2_IB_CONFIG = 5,
DMUB_CMD__FAMS2_DRR_UPDATE = 6,
DMUB_CMD__FAMS2_FLIP = 7,
};
/**
@@ -3616,6 +3666,12 @@ struct dmub_rb_cmd_psr_set_power_opt {
struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
};
enum dmub_alpm_mode {
ALPM_AUXWAKE = 0,
ALPM_AUXLESS = 1,
ALPM_UNSUPPORTED = 2,
};
/**
* Definition of Replay Residency GPINT command.
* Bit[0] - Residency mode for Revision 0
@@ -3749,6 +3805,15 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
};
struct dmub_alpm_auxless_data {
uint16_t lfps_setup_ns;
uint16_t lfps_period_ns;
uint16_t lfps_silence_ns;
uint16_t lfps_t1_t2_override_us;
short lfps_t1_t2_offset_us;
uint8_t lttpr_count;
};
/**
* Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -3819,6 +3884,10 @@ struct dmub_cmd_replay_copy_settings_data {
* Use FSM state for Replay power up/down
*/
uint8_t use_phy_fsm;
/**
* Use for AUX-less ALPM LFPS wake operation
*/
struct dmub_alpm_auxless_data auxless_alpm_data;
};
/**
@@ -5884,8 +5953,11 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
*/
struct dmub_rb_cmd_assr_enable assr_enable;
struct dmub_rb_cmd_fams2 fams2_config;
struct dmub_rb_cmd_ib ib_fams2_config;
struct dmub_rb_cmd_fams2_drr_update fams2_drr_update;
struct dmub_rb_cmd_fams2_flip fams2_flip;

View File

@@ -66,24 +66,20 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
const uint32_t timeout_us = 1 * 1000 * 1000; //1s
const uint32_t poll_delay_us = 1; //1us
uint32_t i = 0;
uint32_t in_reset, scratch, pwait_mode;
uint32_t enabled, in_reset, scratch, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
REG_GET(DMCUB_CNTL,
DMCUB_ENABLE, &enabled);
REG_GET(DMCUB_CNTL2,
DMCUB_SOFT_RESET, &in_reset);
if (in_reset == 0) {
if (enabled && in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
for (i = 0; i < timeout_us; i++) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
udelay(poll_delay_us);
}
for (; i < timeout_us; i++) {
scratch = dmub->hw_funcs.get_gpint_response(dmub);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)

View File

@@ -952,10 +952,8 @@ enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
!dmub->hw_funcs.get_inbox1_wptr)
return DMUB_STATUS_INVALID;
/* take a snapshot of the required mailbox state */
scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
for (i = 0; i <= timeout_us; i += polling_interval_us) {
scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending &&
@@ -978,30 +976,6 @@ enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
static enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
{
uint32_t rptr;
/* update inbox1 state */
rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
if (rptr > dmub->inbox1.rb.capacity)
return DMUB_STATUS_HW_FAILURE;
if (dmub->inbox1.rb.rptr > rptr) {
/* rb wrapped */
dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
} else {
dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
}
dmub->inbox1.rb.rptr = rptr;
/* update reg_inbox0 */
dmub_srv_update_reg_inbox0_status(dmub);
return DMUB_STATUS_OK;
}
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
@@ -1353,3 +1327,33 @@ enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
{
uint32_t rptr;
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
if (dmub->power_state != DMUB_POWER_STATE_D0)
return DMUB_STATUS_POWER_STATE_D3;
/* update inbox1 state */
rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
if (rptr > dmub->inbox1.rb.capacity)
return DMUB_STATUS_HW_FAILURE;
if (dmub->inbox1.rb.rptr > rptr) {
/* rb wrapped */
dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
} else {
dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
}
dmub->inbox1.rb.rptr = rptr;
/* update reg_inbox0 */
dmub_srv_update_reg_inbox0_status(dmub);
return DMUB_STATUS_OK;
}

Some files were not shown because too many files have changed in this diff Show More