mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-08 23:49:14 +08:00
drm/amd/pm: support return vpe clock table
pm supports return vpe clock table and soc clock table Signed-off-by: Peyton Lee <peytolee@amd.com> Reviewed-by: Li Ma <li.ma@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -247,6 +247,7 @@ struct pp_smu_funcs_nv {
|
||||
#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
|
||||
#define PP_SMU_NUM_DCLK_DPM_LEVELS 8
|
||||
#define PP_SMU_NUM_VCLK_DPM_LEVELS 8
|
||||
#define PP_SMU_NUM_VPECLK_DPM_LEVELS 8
|
||||
|
||||
struct dpm_clock {
|
||||
uint32_t Freq; // In MHz
|
||||
@@ -262,6 +263,7 @@ struct dpm_clocks {
|
||||
struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS];
|
||||
struct dpm_clock VClocks[PP_SMU_NUM_VCLK_DPM_LEVELS];
|
||||
struct dpm_clock DClocks[PP_SMU_NUM_DCLK_DPM_LEVELS];
|
||||
struct dpm_clock VPEClocks[PP_SMU_NUM_VPECLK_DPM_LEVELS];
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -616,6 +616,16 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
|
||||
enable ? "enable" : "disable", ret);
|
||||
}
|
||||
|
||||
void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
|
||||
if (ret)
|
||||
DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
|
||||
enable ? "enable" : "disable", ret);
|
||||
}
|
||||
|
||||
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
|
||||
{
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
@@ -445,6 +445,7 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
|
||||
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
|
||||
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
|
||||
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
|
||||
void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable);
|
||||
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
|
||||
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable);
|
||||
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size);
|
||||
|
||||
@@ -1085,6 +1085,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
|
||||
0, NULL);
|
||||
}
|
||||
|
||||
static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
|
||||
{
|
||||
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
|
||||
uint8_t idx;
|
||||
|
||||
/* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
|
||||
for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
|
||||
clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
|
||||
clock_table->SocClocks[idx].Vol = 0;
|
||||
}
|
||||
|
||||
for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
|
||||
clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
|
||||
clock_table->VPEClocks[idx].Vol = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
|
||||
.check_fw_status = smu_v14_0_check_fw_status,
|
||||
.check_fw_version = smu_v14_0_check_fw_version,
|
||||
@@ -1115,6 +1134,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
|
||||
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
|
||||
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
|
||||
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
|
||||
.get_dpm_clock_table = smu_14_0_0_get_dpm_table,
|
||||
};
|
||||
|
||||
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
|
||||
|
||||
Reference in New Issue
Block a user