blob: ff3cfdbd620e96a426e64879e9fbcd6006f81209 [file] [log] [blame]
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
#include "smu11_driver_if.h"
#include "soc15_common.h"
#include "atom.h"
#include "vega20_ppt.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
#include "asic_reg/mp/mp_9_0_offset.h"
#include "asic_reg/mp/mp_9_0_sh_mask.h"
#include "asic_reg/nbio/nbio_7_4_offset.h"
MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
#define SMU11_TOOL_SIZE 0x19000
static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
return 0;
}
static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
return 0;
}
static int smu_v11_0_wait_for_response(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t cur_value, i;
for (i = 0; i < adev->usec_timeout; i++) {
cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
break;
udelay(1);
}
/* timeout means wrong logic */
if (i == adev->usec_timeout)
return -ETIME;
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
index = smu_msg_get_index(smu, msg);
if (index < 0)
return index;
smu_v11_0_wait_for_response(smu);
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v11_0_wait_for_response(smu);
if (ret)
pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
ret);
return ret;
}
static int
smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
uint32_t param)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
index = smu_msg_get_index(smu, msg);
if (index < 0)
return index;
ret = smu_v11_0_wait_for_response(smu);
if (ret)
pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
ret);
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v11_0_wait_for_response(smu);
if (ret)
pr_err("Failed to send message 0x%x, response 0x%x\n", msg,
ret);
return ret;
}
static int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const char *chip_name;
char fw_name[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
struct amdgpu_firmware_info *ucode = NULL;
switch (adev->asic_type) {
case CHIP_VEGA20:
chip_name = "vega20";
break;
default:
BUG();
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err)
goto out;
err = amdgpu_ucode_validate(adev->pm.fw);
if (err)
goto out;
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(&hdr->header);
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
ucode->fw = adev->pm.fw;
header = (const struct common_firmware_header *)ucode->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
out:
if (err) {
DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
fw_name);
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
}
return err;
}
static int smu_v11_0_load_microcode(struct smu_context *smu)
{
return 0;
}
static int smu_v11_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
return 0;
return -EIO;
}
static int smu_v11_0_check_fw_version(struct smu_context *smu)
{
uint32_t smu_version = 0xff;
int ret = 0;
ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
if (ret)
goto err;
ret = smu_read_smc_arg(smu, &smu_version);
if (ret)
goto err;
if (smu_version == SMU11_DRIVER_IF_VERSION)
return 0;
err:
return ret;
}
static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
{
int ret, index;
uint16_t size;
uint8_t frev, crev;
void *table;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
(uint8_t **)&table);
if (ret)
return ret;
smu->smu_table.power_play_table = table;
smu->smu_table.power_play_table_size = size;
return 0;
}
static int smu_v11_0_init_dpm_context(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
return -EINVAL;
smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), GFP_KERNEL);
if (!smu_dpm->dpm_context)
return -ENOMEM;
smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
return 0;
}
static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
return -EINVAL;
kfree(smu_dpm->dpm_context);
smu_dpm->dpm_context = NULL;
smu_dpm->dpm_context_size = 0;
return 0;
}
static int smu_v11_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
int ret = 0;
if (smu_table->tables || smu_table->table_count != 0)
return -EINVAL;
tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
if (!tables)
return -ENOMEM;
smu_table->tables = tables;
smu_table->table_count = TABLE_COUNT;
SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
ret = smu_v11_0_init_dpm_context(smu);
if (ret)
return ret;
return 0;
}
static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
int ret = 0;
if (!smu_table->tables || smu_table->table_count == 0)
return -EINVAL;
kfree(smu_table->tables);
smu_table->tables = NULL;
smu_table->table_count = 0;
ret = smu_v11_0_fini_dpm_context(smu);
if (ret)
return ret;
return 0;
}
static int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
GFP_KERNEL);
if (!smu_power->power_context)
return -ENOMEM;
smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
return 0;
}
static int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
if (!smu_power->power_context || smu_power->power_context_size == 0)
return -EINVAL;
kfree(smu_power->power_context);
smu_power->power_context = NULL;
smu_power->power_context_size = 0;
return 0;
}
int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
{
int ret, index;
uint16_t size;
uint8_t frev, crev;
struct atom_common_table_header *header;
struct atom_firmware_info_v3_3 *v_3_3;
struct atom_firmware_info_v3_1 *v_3_1;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
firmwareinfo);
ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
(uint8_t **)&header);
if (ret)
return ret;
if (header->format_revision != 3) {
pr_err("unknown atom_firmware_info version! for smu11\n");
return -EINVAL;
}
switch (header->content_revision) {
case 0:
case 1:
case 2:
v_3_1 = (struct atom_firmware_info_v3_1 *)header;
smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
smu->smu_table.boot_values.socclk = 0;
smu->smu_table.boot_values.dcefclk = 0;
smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
smu->smu_table.boot_values.pp_table_id = 0;
break;
case 3:
default:
v_3_3 = (struct atom_firmware_info_v3_3 *)header;
smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
smu->smu_table.boot_values.socclk = 0;
smu->smu_table.boot_values.dcefclk = 0;
smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
}
return 0;
}
static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
{
int ret, index;
struct amdgpu_device *adev = smu->adev;
struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
getsmuclockinfo);
ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
(uint32_t *)&input);
if (ret)
return -EINVAL;
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
memset(&input, 0, sizeof(input));
input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
getsmuclockinfo);
ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
(uint32_t *)&input);
if (ret)
return -EINVAL;
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
return 0;
}
static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
int ret = 0;
uint64_t address;
uint32_t address_low, address_high;
if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
return ret;
address = (uint64_t)memory_pool->cpu_addr;
address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address);
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrHigh,
address_high);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrLow,
address_low);
if (ret)
return ret;
address = memory_pool->mc_address;
address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
address_high);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
address_low);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
(uint32_t)memory_pool->size);
if (ret)
return ret;
return ret;
}
static int smu_v11_0_check_pptable(struct smu_context *smu)
{
int ret;
ret = smu_check_powerplay_table(smu);
return ret;
}
static int smu_v11_0_parse_pptable(struct smu_context *smu)
{
int ret;
struct smu_table_context *table_context = &smu->smu_table;
if (table_context->driver_pptable)
return -EINVAL;
table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
if (!table_context->driver_pptable)
return -ENOMEM;
ret = smu_store_powerplay_table(smu);
if (ret)
return -EINVAL;
ret = smu_append_powerplay_table(smu);
return ret;
}
static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
PPTable_t *driver_ppt = (PPTable_t *)&(smu->smu_table.tables[TABLE_PPTABLE]);
struct smu_11_0_dpm_context *dpm_context = (struct smu_11_0_dpm_context *)smu_dpm->dpm_context;
if (dpm_context && driver_ppt) {
dpm_context->dpm_tables.soc_table.min = driver_ppt->FreqTableSocclk[0];
dpm_context->dpm_tables.soc_table.max = driver_ppt->FreqTableSocclk[NUM_SOCCLK_DPM_LEVELS - 1];
dpm_context->dpm_tables.gfx_table.min = driver_ppt->FreqTableGfx[0];
dpm_context->dpm_tables.gfx_table.max = driver_ppt->FreqTableGfx[NUM_GFXCLK_DPM_LEVELS - 1];
dpm_context->dpm_tables.uclk_table.min = driver_ppt->FreqTableUclk[0];
dpm_context->dpm_tables.uclk_table.max = driver_ppt->FreqTableUclk[NUM_UCLK_DPM_LEVELS - 1];
dpm_context->dpm_tables.vclk_table.min = driver_ppt->FreqTableVclk[0];
dpm_context->dpm_tables.vclk_table.max = driver_ppt->FreqTableVclk[NUM_VCLK_DPM_LEVELS - 1];
dpm_context->dpm_tables.dclk_table.min = driver_ppt->FreqTableDclk[0];
dpm_context->dpm_tables.dclk_table.max = driver_ppt->FreqTableDclk[NUM_DCLK_DPM_LEVELS - 1];
dpm_context->dpm_tables.dcef_table.min = driver_ppt->FreqTableDcefclk[0];
dpm_context->dpm_tables.dcef_table.max = driver_ppt->FreqTableDcefclk[NUM_DCEFCLK_DPM_LEVELS - 1];
dpm_context->dpm_tables.pixel_table.min = driver_ppt->FreqTablePixclk[0];
dpm_context->dpm_tables.pixel_table.max = driver_ppt->FreqTablePixclk[NUM_PIXCLK_DPM_LEVELS - 1];
dpm_context->dpm_tables.display_table.min = driver_ppt->FreqTableDispclk[0];
dpm_context->dpm_tables.display_table.max = driver_ppt->FreqTableDispclk[NUM_DISPCLK_DPM_LEVELS - 1];
dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0];
dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1];
return 0;
}
return -EINVAL;
}
static int smu_v11_0_copy_table_to_smc(struct smu_context *smu,
uint32_t table_id)
{
struct smu_table_context *table_context = &smu->smu_table;
struct smu_table *driver_pptable = &smu->smu_table.tables[table_id];
int ret = 0;
if (table_id >= TABLE_COUNT) {
pr_err("Invalid SMU Table ID for smu11!");
return -EINVAL;
}
if (!driver_pptable->cpu_addr) {
pr_err("Invalid virtual address for smu11!");
return -EINVAL;
}
if (!driver_pptable->mc_address) {
pr_err("Invalid MC address for smu11!");
return -EINVAL;
}
if (!driver_pptable->size) {
pr_err("Invalid SMU Table size for smu11!");
return -EINVAL;
}
memcpy(driver_pptable->cpu_addr, table_context->driver_pptable,
driver_pptable->size);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(driver_pptable->mc_address));
if (ret) {
pr_err("[CopyTableToSMC] Attempt to Set Dram Addr High Failed!");
return ret;
}
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(driver_pptable->mc_address));
if (ret) {
pr_err("[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!");
return ret;
}
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_TransferTableDram2Smu,
table_id);
if (ret) {
pr_err("[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!");
return ret;
}
return 0;
}
static int smu_v11_0_write_pptable(struct smu_context *smu)
{
int ret = 0;
ret = smu_v11_0_copy_table_to_smc(smu, TABLE_PPTABLE);
return ret;
}
static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
int ret = 0;
struct smu_table_context *table_context = &smu->smu_table;
if (!table_context)
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
table_context->boot_values.dcefclk / 100);
if (ret)
pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
return ret;
}
static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
if (tool_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
upper_32_bits(tool_table->mc_address));
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow,
lower_32_bits(tool_table->mc_address));
}
return ret;
}
static const struct smu_funcs smu_v11_0_funcs = {
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
.check_fw_status = smu_v11_0_check_fw_status,
.check_fw_version = smu_v11_0_check_fw_version,
.send_smc_msg = smu_v11_0_send_msg,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
.read_smc_arg = smu_v11_0_read_arg,
.read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
.init_smc_tables = smu_v11_0_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
.get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.check_pptable = smu_v11_0_check_pptable,
.parse_pptable = smu_v11_0_parse_pptable,
.populate_smc_pptable = smu_v11_0_populate_smc_pptable,
.write_pptable = smu_v11_0_write_pptable,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
};
void smu_v11_0_set_smu_funcs(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
smu->funcs = &smu_v11_0_funcs;
switch (adev->asic_type) {
case CHIP_VEGA20:
vega20_set_ppt_funcs(smu);
break;
default:
pr_warn("Unknow asic for smu11\n");
}
}