#define SWSMU_CODE_LAYER_L2
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_atombios.h"
#include "smu_v13_0_6_pmfw.h"
#include "smu13_driver_if_v13_0_6.h"
#include "smu_v13_0_6_ppsmc.h"
#include "soc15_common.h"
#include "atom.h"
#include "power_state.h"
#include "smu_v13_0.h"
#include "smu_v13_0_6_ppt.h"
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "thm/thm_11_0_2_offset.h"
#include "thm/thm_11_0_2_sh_mask.h"
#include "amdgpu_xgmi.h"
#include <linux/pci.h>
#include "amdgpu_ras.h"
#include "amdgpu_mca.h"
#include "amdgpu_aca.h"
#include "smu_cmn.h"
#include "mp/mp_13_0_6_offset.h"
#include "mp/mp_13_0_6_sh_mask.h"
#include "umc_v12_0.h"
#undef MP1_Public
#undef smnMP1_FIRMWARE_FLAGS
#define MP1_Public 0x03b00000
#define smnMP1_FIRMWARE_FLAGS 0x3010028
#undef pr_err
#undef pr_warn
#undef pr_info
#undef pr_debug
MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature) \
[smu_feature] = { 1, (smu_13_0_6_feature) }
#define FEATURE_MASK(feature) (1ULL << feature)
static const struct smu_feature_bits smu_v13_0_6_dpm_features = {
.bits = {
SMU_FEATURE_BIT_INIT(FEATURE_DATA_CALCULATION),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_LCLK),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_XGMI),
SMU_FEATURE_BIT_INIT(FEATURE_DPM_VCN)
}
};
#define smnPCIE_ESM_CTRL 0x93D0
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
#define MAX_LINK_WIDTH 6
#define smnPCIE_LC_SPEED_CNTL 0x1a340290
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
#define LINK_SPEED_MAX 4
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
uint16_t mcatype;
};
struct mca_ras_info {
enum amdgpu_ras_block blkid;
enum amdgpu_mca_ip ip;
int *err_code_array;
int err_code_count;
int (*get_err_count)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count);
bool (*bank_is_valid)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry);
};
#define P2S_TABLE_ID_A 0x50325341
#define P2S_TABLE_ID_X 0x50325358
#define P2S_TABLE_ID_3 0x50325303
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0),
MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0),
MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0),
MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0),
MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0),
MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1),
MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1),
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1),
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI),
MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI),
MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI),
MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
};
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
CLK_MAP(FCLK, PPCLK_FCLK),
CLK_MAP(UCLK, PPCLK_UCLK),
CLK_MAP(MCLK, PPCLK_UCLK),
CLK_MAP(DCLK, PPCLK_DCLK),
CLK_MAP(VCLK, PPCLK_VCLK),
CLK_MAP(LCLK, PPCLK_LCLK),
};
static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] = {
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_DPM_UCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT, FEATURE_DPM_VCN),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT, FEATURE_DPM_VCN),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, FEATURE_DPM_XGMI),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_GFXOFF_BIT, FEATURE_GFXOFF),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK),
};
#define TABLE_PMSTATUSLOG 0
#define TABLE_SMU_METRICS 1
#define TABLE_I2C_COMMANDS 2
#define TABLE_COUNT 3
static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PMSTATUSLOG),
TAB_MAP(SMU_METRICS),
TAB_MAP(I2C_COMMANDS),
};
static const uint8_t smu_v13_0_6_throttler_map[] = {
[THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT),
[THROTTLER_THERMAL_SOCKET_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT),
[THROTTLER_THERMAL_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
[THROTTLER_THERMAL_VR_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
[THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
};
#define GET_GPU_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V0) ?\
(metrics_v0->field) : (metrics_v2->field))
#define GET_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V1) ?\
(metrics_v1->field) : GET_GPU_METRIC_FIELD(field, version))
#define METRICS_TABLE_SIZE (max3(sizeof(MetricsTableV0_t),\
sizeof(MetricsTableV1_t),\
sizeof(MetricsTableV2_t)))
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
uint32_t feature_num;
struct smu_dpm_table *dpm_table;
uint32_t *freq_table;
};
static inline int smu_v13_0_6_get_metrics_version(struct smu_context *smu)
{
if ((smu->adev->flags & AMD_IS_APU) &&
smu->smc_fw_version <= 0x4556900)
return METRICS_VERSION_V1;
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 12))
return METRICS_VERSION_V2;
return METRICS_VERSION_V0;
}
static inline void smu_v13_0_6_cap_set(struct smu_context *smu,
enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
dpm_context->caps |= BIT_ULL(cap);
}
static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
dpm_context->caps &= ~BIT_ULL(cap);
}
bool smu_v13_0_6_cap_supported(struct smu_context *smu,
enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
return !!(dpm_context->caps & BIT_ULL(cap));
}
static void smu_v13_0_14_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
SMU_CAP(SET_UCLK_MAX),
SMU_CAP(DPM_POLICY),
SMU_CAP(PCIE_METRICS),
SMU_CAP(CTF_LIMIT),
SMU_CAP(MCA_DEBUG_MODE),
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND) };
uint32_t fw_ver = smu->smc_fw_version;
for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
smu_v13_0_6_cap_set(smu, default_cap_list[i]);
if (fw_ver >= 0x05550E00)
smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
if (fw_ver >= 0x05550B00)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
if (fw_ver >= 0x5551800)
smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
if (fw_ver >= 0x5551600) {
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
}
static void smu_v13_0_12_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
SMU_CAP(PCIE_METRICS),
SMU_CAP(CTF_LIMIT),
SMU_CAP(MCA_DEBUG_MODE),
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND),
SMU_CAP(OTHER_END_METRICS),
SMU_CAP(PER_INST_METRICS) };
uint32_t fw_ver = smu->smc_fw_version;
for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
smu_v13_0_6_cap_set(smu, default_cap_list[i]);
if (fw_ver < 0x00561900)
smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
if (fw_ver >= 0x00561700)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
if (fw_ver >= 0x00561E00)
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
if (fw_ver >= 0x00562500)
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
if (fw_ver >= 0x04560100) {
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
if (fw_ver > 0x04560900)
smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
if (fw_ver >= 0x04560D00) {
smu_v13_0_6_cap_set(smu, SMU_CAP(FAST_PPT));
if (smu->adev->gmc.xgmi.physical_node_id == 0)
smu_v13_0_6_cap_set(smu, SMU_CAP(SYSTEM_POWER_METRICS));
}
if (fw_ver >= 0x04560700) {
if (fw_ver >= 0x04560900) {
smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
if (smu->adev->gmc.xgmi.physical_node_id == 0)
smu_v13_0_6_cap_set(smu, SMU_CAP(NPM_METRICS));
} else if (!amdgpu_sriov_vf(smu->adev))
smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
} else {
smu_v13_0_12_tables_fini(smu);
}
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
SMU_CAP(SET_UCLK_MAX),
SMU_CAP(DPM_POLICY),
SMU_CAP(PCIE_METRICS),
SMU_CAP(CTF_LIMIT),
SMU_CAP(MCA_DEBUG_MODE),
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND) };
struct amdgpu_device *adev = smu->adev;
uint32_t fw_ver = smu->smc_fw_version;
uint32_t pgm = (fw_ver >> 24) & 0xFF;
for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
smu_v13_0_6_cap_set(smu, default_cap_list[i]);
if (fw_ver < 0x552F00)
smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
if (fw_ver < 0x554500)
smu_v13_0_6_cap_clear(smu, SMU_CAP(CTF_LIMIT));
if (adev->flags & AMD_IS_APU) {
smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
if (fw_ver >= 0x04556A00)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
} else {
if (fw_ver >= 0x557600)
smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
if (fw_ver < 0x00556000)
smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600))
smu_v13_0_6_cap_clear(smu, SMU_CAP(SET_UCLK_MAX));
if (fw_ver < 0x556300)
smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
if (fw_ver < 0x554800)
smu_v13_0_6_cap_clear(smu, SMU_CAP(MCA_DEBUG_MODE));
if (fw_ver >= 0x556F00)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver < 0x00555a00)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
if (fw_ver < 0x00555600)
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
if ((pgm == 7 && fw_ver >= 0x7550E00) ||
(pgm == 0 && fw_ver >= 0x00557E00))
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
if (amdgpu_sriov_vf(adev)) {
if (fw_ver >= 0x00558200)
amdgpu_virt_attr_set(&adev->virt.virt_caps,
AMDGPU_VIRT_CAP_POWER_LIMIT,
AMDGPU_CAP_ATTR_RW);
if ((pgm == 0 && fw_ver >= 0x00558000) ||
(pgm == 7 && fw_ver >= 0x7551000)) {
smu_v13_0_6_cap_set(smu,
SMU_CAP(STATIC_METRICS));
smu_v13_0_6_cap_set(smu,
SMU_CAP(BOARD_VOLTAGE));
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
} else {
if ((pgm == 0 && fw_ver >= 0x00557F01) ||
(pgm == 7 && fw_ver >= 0x7551000)) {
smu_v13_0_6_cap_set(smu,
SMU_CAP(STATIC_METRICS));
smu_v13_0_6_cap_set(smu,
SMU_CAP(BOARD_VOLTAGE));
}
if ((pgm == 0 && fw_ver >= 0x00558000) ||
(pgm == 7 && fw_ver >= 0x7551000))
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
((pgm == 4) && (fw_ver >= 0x4557000)))
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
if ((pgm == 0 && fw_ver >= 0x00558200) ||
(pgm == 7 && fw_ver >= 0x07551400))
smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
static void smu_v13_0_x_init_caps(struct smu_context *smu)
{
switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 12):
return smu_v13_0_12_init_caps(smu);
case IP_VERSION(13, 0, 14):
return smu_v13_0_14_init_caps(smu);
default:
return smu_v13_0_6_init_caps(smu);
}
}
static int smu_v13_0_6_check_fw_version(struct smu_context *smu)
{
int r;
r = smu_v13_0_check_fw_version(smu);
if (!r)
smu_v13_0_x_init_caps(smu);
return r;
}
static int smu_v13_0_6_init_microcode(struct smu_context *smu)
{
const struct smc_firmware_header_v2_1 *v2_1;
const struct common_firmware_header *hdr;
struct amdgpu_firmware_info *ucode = NULL;
struct smc_soft_pptable_entry *entries;
struct amdgpu_device *adev = smu->adev;
uint32_t p2s_table_id = P2S_TABLE_ID_A;
int ret = 0, i, p2stable_count;
int var = (adev->pdev->device & 0xF);
char ucode_prefix[15];
if (amdgpu_sriov_vf(adev) ||
(amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)))
return 0;
if (!(adev->flags & AMD_IS_APU)) {
p2s_table_id = P2S_TABLE_ID_X;
if (var == 0x5)
p2s_table_id = P2S_TABLE_ID_3;
}
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
sizeof(ucode_prefix));
ret = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
"amdgpu/%s.bin", ucode_prefix);
if (ret)
goto out;
hdr = (const struct common_firmware_header *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(hdr);
v2_1 = (const struct smc_firmware_header_v2_1 *)adev->pm.fw->data;
entries = (struct smc_soft_pptable_entry
*)((uint8_t *)v2_1 +
le32_to_cpu(v2_1->pptable_entry_offset));
p2stable_count = le32_to_cpu(v2_1->pptable_count);
for (i = 0; i < p2stable_count; i++) {
if (le32_to_cpu(entries[i].id) == p2s_table_id) {
smu->pptable_firmware.data =
((uint8_t *)v2_1 +
le32_to_cpu(entries[i].ppt_offset_bytes));
smu->pptable_firmware.size =
le32_to_cpu(entries[i].ppt_size_bytes);
break;
}
}
if (smu->pptable_firmware.data && smu->pptable_firmware.size) {
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
ucode->ucode_id = AMDGPU_UCODE_ID_P2S_TABLE;
ucode->fw = &smu->pptable_firmware;
adev->firmware.fw_size += ALIGN(ucode->fw->size, PAGE_SIZE);
}
return 0;
out:
amdgpu_ucode_release(&adev->pm.fw);
return ret;
}
static int smu_v13_0_6_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
struct smu_v13_0_6_gpu_metrics *gpu_metrics;
void *driver_pptable __free(kfree) = NULL;
void *metrics_table __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
int gpu_metrcs_size = METRICS_TABLE_SIZE;
int ret;
if (!(adev->flags & AMD_IS_APU))
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
max(gpu_metrcs_size,
smu_v13_0_12_get_max_metrics_size()),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
SMU_TABLE_INIT(tables, SMU_TABLE_PMFW_SYSTEM_METRICS,
smu_v13_0_12_get_system_metrics_size(), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
if (!metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
driver_pptable = kzalloc_obj(struct PPTable_t);
if (!driver_pptable)
return -ENOMEM;
ret = smu_driver_table_init(smu, SMU_DRIVER_TABLE_GPU_METRICS,
sizeof(struct smu_v13_0_6_gpu_metrics),
SMU_GPU_METRICS_CACHE_INTERVAL);
if (ret)
return ret;
gpu_metrics = (struct smu_v13_0_6_gpu_metrics *)smu_driver_table_ptr(
smu, SMU_DRIVER_TABLE_GPU_METRICS);
smu_v13_0_6_gpu_metrics_init(gpu_metrics, 1, 9);
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 12)) {
ret = smu_v13_0_12_tables_init(smu);
if (ret) {
smu_driver_table_fini(smu,
SMU_DRIVER_TABLE_GPU_METRICS);
return ret;
}
}
smu_table->metrics_table = no_free_ptr(metrics_table);
smu_table->driver_pptable = no_free_ptr(driver_pptable);
return 0;
}
static int smu_v13_0_6_select_policy_soc_pstate(struct smu_context *smu,
int policy)
{
struct amdgpu_device *adev = smu->adev;
int ret, param;
switch (policy) {
case SOC_PSTATE_DEFAULT:
param = 0;
break;
case SOC_PSTATE_0:
param = 1;
break;
case SOC_PSTATE_1:
param = 2;
break;
case SOC_PSTATE_2:
param = 3;
break;
default:
return -EINVAL;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetThrottlingPolicy,
param, NULL);
if (ret)
dev_err(adev->dev, "select soc pstate policy %d failed",
policy);
return ret;
}
static int smu_v13_0_6_select_plpd_policy(struct smu_context *smu, int level)
{
struct amdgpu_device *adev = smu->adev;
int ret, param;
switch (level) {
case XGMI_PLPD_DEFAULT:
param = PPSMC_PLPD_MODE_DEFAULT;
break;
case XGMI_PLPD_OPTIMIZED:
param = PPSMC_PLPD_MODE_OPTIMIZED;
break;
case XGMI_PLPD_DISALLOW:
param = 0;
break;
default:
return -EINVAL;
}
if (level == XGMI_PLPD_DISALLOW)
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_GmiPwrDnControl, param, NULL);
else
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_SelectPLPDMode, param, NULL);
if (ret)
dev_err(adev->dev,
"select xgmi per-link power down policy %d failed\n",
level);
return ret;
}
static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_dpm_policy *policy;
smu_dpm->dpm_context =
kzalloc_obj(struct smu_13_0_dpm_context);
if (!smu_dpm->dpm_context)
return -ENOMEM;
smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
smu_dpm->dpm_policies =
kzalloc_obj(struct smu_dpm_policy_ctxt);
if (!smu_dpm->dpm_policies) {
kfree(smu_dpm->dpm_context);
return -ENOMEM;
}
if (!(smu->adev->flags & AMD_IS_APU)) {
policy = &(smu_dpm->dpm_policies->policies[0]);
policy->policy_type = PP_PM_POLICY_SOC_PSTATE;
policy->level_mask = BIT(SOC_PSTATE_DEFAULT) |
BIT(SOC_PSTATE_0) | BIT(SOC_PSTATE_1) |
BIT(SOC_PSTATE_2);
policy->current_level = SOC_PSTATE_DEFAULT;
policy->set_policy = smu_v13_0_6_select_policy_soc_pstate;
smu_cmn_generic_soc_policy_desc(policy);
smu_dpm->dpm_policies->policy_mask |=
BIT(PP_PM_POLICY_SOC_PSTATE);
}
policy = &(smu_dpm->dpm_policies->policies[1]);
policy->policy_type = PP_PM_POLICY_XGMI_PLPD;
policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT) |
BIT(XGMI_PLPD_OPTIMIZED);
policy->current_level = XGMI_PLPD_DEFAULT;
policy->set_policy = smu_v13_0_6_select_plpd_policy;
smu_cmn_generic_plpd_policy_desc(policy);
smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD);
return 0;
}
static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
{
int ret = 0;
ret = smu_v13_0_6_tables_init(smu);
if (ret)
return ret;
ret = smu_v13_0_6_allocate_dpm_context(smu);
return ret;
}
static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu)
{
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
smu_v13_0_12_tables_fini(smu);
return smu_v13_0_fini_smc_tables(smu);
}
static int smu_v13_0_6_init_allowed_features(struct smu_context *smu)
{
smu_feature_list_set_all(smu, SMU_FEATURE_LIST_ALLOWED);
return 0;
}
int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
bool bypass_cache)
{
struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
struct smu_table *table = &smu_table->driver_table;
int ret;
if (bypass_cache || !smu_table->metrics_time ||
time_after(jiffies,
smu_table->metrics_time + msecs_to_jiffies(1))) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
if (ret) {
dev_info(smu->adev->dev,
"Failed to export SMU metrics table!\n");
return ret;
}
amdgpu_hdp_invalidate(smu->adev, NULL);
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
smu_table->metrics_time = jiffies;
}
if (metrics_table)
memcpy(metrics_table, smu_table->metrics_table, table_size);
return 0;
}
static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
void *metrics, size_t max_size)
{
struct smu_table_context *smu_tbl_ctxt = &smu->smu_table;
uint32_t table_version = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].version;
uint32_t table_size = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].size;
struct amdgpu_pm_metrics *pm_metrics = metrics;
uint32_t pmfw_version;
int ret;
if (!pm_metrics || !max_size)
return -EINVAL;
if (max_size < (table_size + sizeof(pm_metrics->common_header)))
return -EOVERFLOW;
ret = smu_v13_0_6_get_metrics_table(smu, pm_metrics->data, true);
if (ret)
return ret;
smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
memset(&pm_metrics->common_header, 0,
sizeof(pm_metrics->common_header));
pm_metrics->common_header.mp1_ip_discovery_version =
amdgpu_ip_version(smu->adev, MP1_HWIP, 0);
pm_metrics->common_header.pmfw_version = pmfw_version;
pm_metrics->common_header.pmmetrics_version = table_version;
pm_metrics->common_header.structure_size =
sizeof(pm_metrics->common_header) + table_size;
return pm_metrics->common_header.structure_size;
}
static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu,
StaticMetricsTable_t *static_metrics)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
if (!static_metrics->InputTelemetryVoltageInmV) {
dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
static_metrics->InputTelemetryVoltageInmV);
}
dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
static_metrics->pldmVersion[0] != 0xFFFFFFFF)
smu->adev->firmware.pldm_version =
static_metrics->pldmVersion[0];
}
int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
struct smu_table *table = &smu_table->driver_table;
int ret;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
if (ret) {
dev_info(smu->adev->dev,
"Failed to export static metrics table!\n");
return ret;
}
amdgpu_hdp_invalidate(smu->adev, NULL);
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
return 0;
}
static void smu_v13_0_6_update_caps(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) &&
!pptable->PPT1Max)
smu_v13_0_6_cap_clear(smu, SMU_CAP(FAST_PPT));
}
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
int version = smu_v13_0_6_get_metrics_version(smu);
int ret, i, retry = 100, n;
uint32_t table_version;
uint16_t max_speed;
uint8_t max_width;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
ret = smu_v13_0_12_setup_driver_pptable(smu);
if (ret)
return ret;
goto out;
}
if (!pptable->Init) {
while (--retry) {
ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
if (ret)
return ret;
if (GET_METRIC_FIELD(AccumulationCounter, version))
break;
usleep_range(1000, 1100);
}
if (!retry)
return -ETIME;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
&table_version);
if (ret)
return ret;
smu_table->tables[SMU_TABLE_SMU_METRICS].version =
table_version;
pptable->MaxSocketPowerLimit =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, version));
pptable->MaxGfxclkFrequency =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version));
pptable->MinGfxclkFrequency =
SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version));
max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version);
max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version);
amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, version)[i]);
pptable->UclkFrequencyTable[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, version)[i]);
pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
GET_METRIC_FIELD(SocclkFrequencyTable, version)[i]);
pptable->VclkFrequencyTable[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, version)[i]);
pptable->DclkFrequencyTable[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, version)[i]);
pptable->LclkFrequencyTable[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, version)[i]);
}
pptable->PublicSerialNumber_AID =
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
0, pptable->PublicSerialNumber_AID);
n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_AID);
for (i = 0; i < n; i++) {
amdgpu_device_set_uid(
smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
GET_METRIC_FIELD(PublicSerialNumber_AID,
version)[i]);
}
n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_XCD);
for (i = 0; i < n; i++) {
amdgpu_device_set_uid(
smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
GET_METRIC_FIELD(PublicSerialNumber_XCD,
version)[i]);
}
pptable->Init = true;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
ret = smu_v13_0_6_get_static_metrics_table(smu);
if (ret)
return ret;
smu_v13_0_6_fill_static_metrics_table(smu, static_metrics);
}
}
out:
smu_v13_0_6_update_caps(smu);
return 0;
}
static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
struct smu_dpm_table *dpm_table;
uint32_t min_clk, max_clk, param;
int ret = 0, clk_id = 0;
if (pptable->Init) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
dpm_table = &dpm_context->dpm_tables.uclk_table;
break;
case SMU_GFXCLK:
case SMU_SCLK:
dpm_table = &dpm_context->dpm_tables.gfx_table;
break;
case SMU_SOCCLK:
dpm_table = &dpm_context->dpm_tables.soc_table;
break;
case SMU_FCLK:
dpm_table = &dpm_context->dpm_tables.fclk_table;
break;
case SMU_VCLK:
dpm_table = &dpm_context->dpm_tables.vclk_table;
break;
case SMU_DCLK:
dpm_table = &dpm_context->dpm_tables.dclk_table;
break;
default:
return -EINVAL;
}
min_clk = SMU_DPM_TABLE_MIN(dpm_table);
max_clk = SMU_DPM_TABLE_MAX(dpm_table);
if (min)
*min = min_clk;
if (max)
*max = max_clk;
if (min_clk && max_clk)
return 0;
}
if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
clk_id = smu_cmn_to_asic_specific_index(
smu, CMN2ASIC_MAPPING_CLK, clk_type);
if (clk_id < 0) {
ret = -EINVAL;
goto failed;
}
param = (clk_id & 0xffff) << 16;
}
if (max) {
if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
ret = smu_cmn_send_smc_msg(
smu, SMU_MSG_GetMaxGfxclkFrequency, max);
else
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_GetMaxDpmFreq, param, max);
if (ret)
goto failed;
}
if (min) {
if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
ret = smu_cmn_send_smc_msg(
smu, SMU_MSG_GetMinGfxclkFrequency, min);
else
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_GetMinDpmFreq, param, min);
}
failed:
return ret;
}
static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *levels)
{
int ret;
ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels);
if (!ret)
++(*levels);
return ret;
}
static void smu_v13_0_6_pm_policy_init(struct smu_context *smu)
{
struct smu_dpm_policy *policy;
policy = smu_get_pm_policy(smu, PP_PM_POLICY_SOC_PSTATE);
if (policy)
policy->current_level = SOC_PSTATE_DEFAULT;
}
static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_dpm_table *dpm_table = NULL;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
uint32_t gfxclkmin, gfxclkmax, levels;
int ret = 0, i, j;
struct smu_v13_0_6_dpm_map dpm_map[] = {
{ SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT,
&dpm_context->dpm_tables.soc_table,
pptable->SocclkFrequencyTable },
{ SMU_UCLK, SMU_FEATURE_DPM_UCLK_BIT,
&dpm_context->dpm_tables.uclk_table,
pptable->UclkFrequencyTable },
{ SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT,
&dpm_context->dpm_tables.fclk_table,
pptable->FclkFrequencyTable },
{ SMU_VCLK, SMU_FEATURE_DPM_VCLK_BIT,
&dpm_context->dpm_tables.vclk_table,
pptable->VclkFrequencyTable },
{ SMU_DCLK, SMU_FEATURE_DPM_DCLK_BIT,
&dpm_context->dpm_tables.dclk_table,
pptable->DclkFrequencyTable },
};
smu_v13_0_6_setup_driver_pptable(smu);
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM_POLICY))) {
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
smu_dpm->dpm_policies->policy_mask &=
~BIT(PP_PM_POLICY_SOC_PSTATE);
}
smu_v13_0_6_pm_policy_init(smu);
dpm_table = &dpm_context->dpm_tables.gfx_table;
dpm_table->clk_type = SMU_GFXCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
&gfxclkmin, &gfxclkmax);
if (ret)
return ret;
dpm_table->count = 2;
dpm_table->dpm_levels[0].value = gfxclkmin;
dpm_table->dpm_levels[0].enabled = true;
dpm_table->dpm_levels[1].value = gfxclkmax;
dpm_table->dpm_levels[1].enabled = true;
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
dpm_table->dpm_levels[0].enabled = true;
}
for (j = 0; j < ARRAY_SIZE(dpm_map); j++) {
dpm_table = dpm_map[j].dpm_table;
levels = 1;
if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) {
ret = smu_v13_0_6_get_dpm_level_count(
smu, dpm_map[j].clk_type, &levels);
if (ret)
return ret;
}
dpm_table->count = levels;
dpm_table->clk_type = dpm_map[j].clk_type;
for (i = 0; i < dpm_table->count; ++i) {
dpm_table->dpm_levels[i].value =
dpm_map[j].freq_table[i];
dpm_table->dpm_levels[i].enabled = true;
}
}
return 0;
}
static int smu_v13_0_6_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
table_context->thermal_controller_type = 0;
return 0;
}
static int smu_v13_0_6_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
mp1_fw_flags =
RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
return 0;
return -EIO;
}
static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
pstate_table->gfxclk_pstate.peak = SMU_DPM_TABLE_MAX(gfx_table);
pstate_table->gfxclk_pstate.curr.min = SMU_DPM_TABLE_MIN(gfx_table);
pstate_table->gfxclk_pstate.curr.max = SMU_DPM_TABLE_MAX(gfx_table);
pstate_table->uclk_pstate.min = SMU_DPM_TABLE_MIN(mem_table);
pstate_table->uclk_pstate.peak = SMU_DPM_TABLE_MAX(mem_table);
pstate_table->uclk_pstate.curr.min = SMU_DPM_TABLE_MIN(mem_table);
pstate_table->uclk_pstate.curr.max = SMU_DPM_TABLE_MAX(mem_table);
pstate_table->socclk_pstate.min = SMU_DPM_TABLE_MIN(soc_table);
pstate_table->socclk_pstate.peak = SMU_DPM_TABLE_MAX(soc_table);
pstate_table->socclk_pstate.curr.min = SMU_DPM_TABLE_MIN(soc_table);
pstate_table->socclk_pstate.curr.max = SMU_DPM_TABLE_MAX(soc_table);
if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
pstate_table->gfxclk_pstate.standard =
gfx_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL].value;
pstate_table->uclk_pstate.standard =
mem_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL].value;
pstate_table->socclk_pstate.standard =
soc_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL].value;
} else {
pstate_table->gfxclk_pstate.standard =
pstate_table->gfxclk_pstate.min;
pstate_table->uclk_pstate.standard =
pstate_table->uclk_pstate.min;
pstate_table->socclk_pstate.standard =
pstate_table->socclk_pstate.min;
}
return 0;
}
static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_13_0_power_context *power_context = smu_power->power_context;
uint32_t throttler_status = 0;
throttler_status = atomic_read(&power_context->throttle_status);
dev_dbg(smu->adev->dev, "SMU Throttler status: %u", throttler_status);
return throttler_status;
}
static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
int version = smu_v13_0_6_get_metrics_version(smu);
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
if (ret)
return ret;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
xcc_id = GET_INST(GC, 0);
*value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
break;
case METRICS_CURR_VCLK:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, version)[0]);
break;
case METRICS_CURR_DCLK:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, version)[0]);
break;
case METRICS_CURR_FCLK:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, version));
break;
case METRICS_AVERAGE_GFXACTIVITY:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version));
break;
case METRICS_AVERAGE_MEMACTIVITY:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version));
break;
case METRICS_CURR_SOCKETPOWER:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_VRSOC:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
*value = UINT_MAX;
break;
}
return ret;
}
static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
{
MetricsMember_t member_type;
if (!value)
return -EINVAL;
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
member_type = METRICS_CURR_GFXCLK;
break;
case SMU_UCLK:
case SMU_MCLK:
member_type = METRICS_CURR_UCLK;
break;
case SMU_SOCCLK:
member_type = METRICS_CURR_SOCCLK;
break;
case SMU_VCLK:
member_type = METRICS_CURR_VCLK;
break;
case SMU_DCLK:
member_type = METRICS_CURR_DCLK;
break;
case SMU_FCLK:
member_type = METRICS_CURR_FCLK;
break;
default:
return -EINVAL;
}
return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
}
static int smu_v13_0_6_emit_clk_levels(struct smu_context *smu,
enum smu_clk_type type, char *buf,
int *offset)
{
int now, size = *offset, start_offset = *offset;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct smu_dpm_table *single_dpm_table = NULL;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
if (amdgpu_ras_intr_triggered()) {
sysfs_emit_at(buf, size, "unavailable\n");
return -EBUSY;
}
dpm_context = smu_dpm->dpm_context;
switch (type) {
case SMU_OD_SCLK:
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
pstate_table->gfxclk_pstate.curr.min,
pstate_table->gfxclk_pstate.curr.max);
break;
case SMU_OD_MCLK:
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SET_UCLK_MAX)))
return -EOPNOTSUPP;
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
pstate_table->uclk_pstate.curr.min,
pstate_table->uclk_pstate.curr.max);
break;
case SMU_SCLK:
case SMU_GFXCLK:
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
break;
case SMU_MCLK:
case SMU_UCLK:
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
break;
case SMU_SOCCLK:
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
break;
case SMU_FCLK:
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
break;
case SMU_VCLK:
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
break;
case SMU_DCLK:
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
break;
default:
break;
}
if (single_dpm_table) {
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, type,
&now);
if (ret) {
dev_err(smu->adev->dev,
"Attempt to get current clk Failed!");
return ret;
}
return smu_cmn_print_dpm_clk_levels(smu, single_dpm_table, now,
buf, offset);
}
*offset += size - start_offset;
return 0;
}
static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
uint32_t feature_mask, uint32_t level)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
uint32_t freq;
int ret = 0;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK))) {
freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
ret = smu_cmn_send_smc_msg_with_param(
smu,
(max ? SMU_MSG_SetSoftMaxGfxClk :
SMU_MSG_SetSoftMinGfxclk),
freq & 0xffff, NULL);
if (ret) {
dev_err(smu->adev->dev,
"Failed to set soft %s gfxclk !\n",
max ? "max" : "min");
return ret;
}
}
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
(feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK))) {
freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level]
.value;
ret = smu_cmn_send_smc_msg_with_param(
smu,
(max ? SMU_MSG_SetSoftMaxByFreq :
SMU_MSG_SetSoftMinByFreq),
(PPCLK_UCLK << 16) | (freq & 0xffff), NULL);
if (ret) {
dev_err(smu->adev->dev,
"Failed to set soft %s memclk !\n",
max ? "max" : "min");
return ret;
}
}
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
(feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK))) {
freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
ret = smu_cmn_send_smc_msg_with_param(
smu,
(max ? SMU_MSG_SetSoftMaxByFreq :
SMU_MSG_SetSoftMinByFreq),
(PPCLK_SOCCLK << 16) | (freq & 0xffff), NULL);
if (ret) {
dev_err(smu->adev->dev,
"Failed to set soft %s socclk !\n",
max ? "max" : "min");
return ret;
}
}
return ret;
}
static int smu_v13_0_6_force_clk_levels(struct smu_context *smu,
enum smu_clk_type type, uint32_t mask)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_dpm_table *single_dpm_table = NULL;
uint32_t soft_min_level, soft_max_level;
int ret = 0;
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
switch (type) {
case SMU_SCLK:
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
if (soft_max_level >= single_dpm_table->count) {
dev_err(smu->adev->dev,
"Clock level specified %d is over max allowed %d\n",
soft_max_level, single_dpm_table->count - 1);
ret = -EINVAL;
break;
}
ret = smu_v13_0_6_upload_dpm_level(
smu, false, FEATURE_MASK(FEATURE_DPM_GFXCLK),
soft_min_level);
if (ret) {
dev_err(smu->adev->dev,
"Failed to upload boot level to lowest!\n");
break;
}
ret = smu_v13_0_6_upload_dpm_level(
smu, true, FEATURE_MASK(FEATURE_DPM_GFXCLK),
soft_max_level);
if (ret)
dev_err(smu->adev->dev,
"Failed to upload dpm max level to highest!\n");
break;
case SMU_MCLK:
case SMU_SOCCLK:
case SMU_FCLK:
ret = -EINVAL;
break;
default:
break;
}
return ret;
}
static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
enum amd_pp_sensors sensor,
uint32_t *value)
{
int ret = 0;
if (!value)
return -EINVAL;
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = smu_v13_0_6_get_smu_metrics_data(
smu, METRICS_AVERAGE_GFXACTIVITY, value);
break;
case AMDGPU_PP_SENSOR_MEM_LOAD:
ret = smu_v13_0_6_get_smu_metrics_data(
smu, METRICS_AVERAGE_MEMACTIVITY, value);
break;
default:
dev_err(smu->adev->dev,
"Invalid sensor for retrieving clock activity\n");
return -EINVAL;
}
return ret;
}
static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
enum amd_pp_sensors sensor,
uint32_t *value)
{
int ret = 0;
if (!value)
return -EINVAL;
switch (sensor) {
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
ret = smu_v13_0_6_get_smu_metrics_data(
smu, METRICS_TEMPERATURE_HOTSPOT, value);
break;
case AMDGPU_PP_SENSOR_MEM_TEMP:
ret = smu_v13_0_6_get_smu_metrics_data(
smu, METRICS_TEMPERATURE_MEM, value);
break;
default:
dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
return -EINVAL;
}
return ret;
}
static int smu_v13_0_6_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor, void *data,
uint32_t *size)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
int ret = 0;
if (amdgpu_ras_intr_triggered())
return 0;
if (!data || !size)
return -EINVAL;
switch (sensor) {
case AMDGPU_PP_SENSOR_MEM_LOAD:
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = smu_v13_0_6_get_current_activity_percent(smu, sensor,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = smu_v13_0_6_get_smu_metrics_data(smu,
METRICS_CURR_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
case AMDGPU_PP_SENSOR_MEM_TEMP:
ret = smu_v13_0_6_thermal_get_temperature(smu, sensor,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(
smu, SMU_UCLK, (uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(
smu, SMU_GFXCLK, (uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDGFX:
ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDBOARD:
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
*(uint32_t *)data = dpm_context->board_volt;
*size = 4;
break;
} else {
ret = -EOPNOTSUPP;
break;
}
case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
case AMDGPU_PP_SENSOR_NODEPOWER:
case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
case AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT:
ret = smu_v13_0_12_get_npm_data(smu, sensor, (uint32_t *)data);
if (ret)
return ret;
*size = 4;
break;
case AMDGPU_PP_SENSOR_UBB_POWER:
case AMDGPU_PP_SENSOR_UBB_POWER_LIMIT:
ret = smu_v13_0_12_get_system_power(smu, sensor, (uint32_t *)data);
if (ret)
return ret;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
uint32_t power_limit = 0;
int ret;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
if (ret) {
dev_err(smu->adev->dev, "Couldn't get PPT limit");
return -EINVAL;
}
if (current_power_limit)
*current_power_limit = power_limit;
if (default_power_limit)
*default_power_limit = pptable->MaxSocketPowerLimit;
if (max_power_limit) {
*max_power_limit = pptable->MaxSocketPowerLimit;
}
if (min_power_limit)
*min_power_limit = 0;
return 0;
}
static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
enum smu_ppt_limit_type limit_type,
uint32_t limit)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
int ret;
if (limit_type == SMU_FAST_PPT_LIMIT) {
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)))
return -EOPNOTSUPP;
if (limit > pptable->PPT1Max || limit < pptable->PPT1Min) {
dev_err(smu->adev->dev,
"New power limit (%d) should be between min %d max %d\n",
limit, pptable->PPT1Min, pptable->PPT1Max);
return -EINVAL;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetFastPptLimit,
limit, NULL);
if (ret)
dev_err(smu->adev->dev, "Set fast PPT limit failed!\n");
return ret;
}
return smu_v13_0_set_power_limit(smu, limit_type, limit);
}
static int smu_v13_0_6_get_ppt_limit(struct smu_context *smu,
uint32_t *ppt_limit,
enum smu_ppt_limit_type type,
enum smu_ppt_limit_level level)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
int ret = 0;
if (type == SMU_FAST_PPT_LIMIT) {
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)))
return -EOPNOTSUPP;
switch (level) {
case SMU_PPT_LIMIT_MAX:
*ppt_limit = pptable->PPT1Max;
break;
case SMU_PPT_LIMIT_CURRENT:
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPptLimit, ppt_limit);
if (ret)
dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
break;
case SMU_PPT_LIMIT_DEFAULT:
*ppt_limit = pptable->PPT1Default;
break;
case SMU_PPT_LIMIT_MIN:
*ppt_limit = pptable->PPT1Min;
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
return -EOPNOTSUPP;
}
static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct smu_context *smu = adev->powerplay.pp_handle;
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_13_0_power_context *power_context = smu_power->power_context;
uint32_t client_id = entry->client_id;
uint32_t ctxid = entry->src_data[0];
uint32_t src_id = entry->src_id;
uint32_t data;
if (client_id == SOC15_IH_CLIENTID_MP1) {
if (src_id == IH_INTERRUPT_ID_TO_DRIVER) {
data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
switch (ctxid) {
case IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
atomic64_inc(&smu->throttle_int_counter);
if (!atomic_read(&adev->throttling_logging_enabled))
return 0;
if (__ratelimit(&adev->throttling_logging_rs)) {
atomic_set(
&power_context->throttle_status,
entry->src_data[1]);
schedule_work(&smu->throttling_logging_work);
}
break;
default:
dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
ctxid, client_id);
break;
}
}
}
return 0;
}
static int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned tyep,
enum amdgpu_interrupt_state state)
{
uint32_t val = 0;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
break;
case AMDGPU_IRQ_STATE_ENABLE:
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
break;
default:
break;
}
return 0;
}
static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs = {
.set = smu_v13_0_6_set_irq_state,
.process = smu_v13_0_6_irq_process,
};
static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct amdgpu_irq_src *irq_src = &smu->irq_source;
int ret = 0;
if (amdgpu_sriov_vf(adev))
return 0;
irq_src->num_types = 1;
irq_src->funcs = &smu_v13_0_6_irq_funcs;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
IH_INTERRUPT_ID_TO_DRIVER,
irq_src);
if (ret)
return ret;
return ret;
}
static int smu_v13_0_6_notify_unload(struct smu_context *smu)
{
if (amdgpu_in_reset(smu->adev))
return 0;
dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
return 0;
}
static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
{
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(MCA_DEBUG_MODE)))
return 0;
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK,
NULL);
}
static int smu_v13_0_6_system_features_control(struct smu_context *smu,
bool enable)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (amdgpu_sriov_vf(adev))
return 0;
if (enable) {
if (!(adev->flags & AMD_IS_APU))
ret = smu_v13_0_system_features_control(smu, enable);
} else {
smu_v13_0_6_notify_unload(smu);
}
return ret;
}
static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu,
uint32_t min,
uint32_t max)
{
int ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
max & 0xffff, NULL);
if (ret)
return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
min & 0xffff, NULL);
return ret;
}
static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
struct smu_dpm_table *uclk_table = &dpm_context->dpm_tables.uclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
int ret;
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
(level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
pstate_table->gfxclk_pstate.curr.max =
SMU_DPM_TABLE_MAX(gfx_table);
}
switch (level) {
case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
return 0;
case AMD_DPM_FORCED_LEVEL_AUTO:
if ((SMU_DPM_TABLE_MIN(gfx_table) !=
pstate_table->gfxclk_pstate.curr.min) ||
(SMU_DPM_TABLE_MAX(gfx_table) !=
pstate_table->gfxclk_pstate.curr.max)) {
ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
smu, SMU_DPM_TABLE_MIN(gfx_table),
SMU_DPM_TABLE_MAX(gfx_table));
if (ret)
return ret;
pstate_table->gfxclk_pstate.curr.min =
SMU_DPM_TABLE_MIN(gfx_table);
pstate_table->gfxclk_pstate.curr.max =
SMU_DPM_TABLE_MAX(gfx_table);
}
if (SMU_DPM_TABLE_MAX(uclk_table) !=
pstate_table->uclk_pstate.curr.max) {
ret = smu_v13_0_set_soft_freq_limited_range(
smu, SMU_UCLK, 0, SMU_DPM_TABLE_MAX(uclk_table),
false);
if (ret)
return ret;
pstate_table->uclk_pstate.curr.max =
SMU_DPM_TABLE_MAX(uclk_table);
}
smu_v13_0_reset_custom_level(smu);
return 0;
case AMD_DPM_FORCED_LEVEL_MANUAL:
return 0;
default:
break;
}
return -EOPNOTSUPP;
}
static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min, uint32_t max,
bool automatic)
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct amdgpu_device *adev = smu->adev;
uint32_t min_clk;
uint32_t max_clk;
int ret = 0;
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK &&
clk_type != SMU_UCLK)
return -EINVAL;
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
(smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
return -EINVAL;
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
if (min >= max) {
dev_err(smu->adev->dev,
"Minimum clk should be less than the maximum allowed clock\n");
return -EINVAL;
}
if (clk_type == SMU_GFXCLK) {
if ((min == pstate_table->gfxclk_pstate.curr.min) &&
(max == pstate_table->gfxclk_pstate.curr.max))
return 0;
ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
smu, min, max);
if (!ret) {
pstate_table->gfxclk_pstate.curr.min = min;
pstate_table->gfxclk_pstate.curr.max = max;
}
}
if (clk_type == SMU_UCLK) {
if (max == pstate_table->uclk_pstate.curr.max)
return 0;
if (!smu_v13_0_6_cap_supported(smu,
SMU_CAP(SET_UCLK_MAX)))
return -EOPNOTSUPP;
ret = smu_v13_0_set_soft_freq_limited_range(
smu, SMU_UCLK, 0, max, false);
if (!ret)
pstate_table->uclk_pstate.curr.max = max;
}
return ret;
}
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
if (!max || (max < min_clk) || (max > max_clk)) {
dev_warn(
adev->dev,
"Invalid max frequency %d MHz specified for determinism\n",
max);
return -EINVAL;
}
ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
max_clk);
if (!ret) {
usleep_range(500, 1000);
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_EnableDeterminism, max, NULL);
if (ret) {
dev_err(adev->dev,
"Failed to enable determinism at GFX clock %d MHz\n",
max);
} else {
pstate_table->gfxclk_pstate.curr.min = min_clk;
pstate_table->gfxclk_pstate.curr.max = max;
}
}
}
return ret;
}
static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
struct smu_dpm_table *uclk_table = &dpm_context->dpm_tables.uclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
uint32_t min_clk;
uint32_t max_clk;
int ret = 0;
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
(smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
return -EINVAL;
switch (type) {
case PP_OD_EDIT_SCLK_VDDC_TABLE:
if (size != 2) {
dev_err(smu->adev->dev,
"Input parameter number not correct\n");
return -EINVAL;
}
min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
if (input[0] == 0) {
if (input[1] < min_clk) {
dev_warn(
smu->adev->dev,
"Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
input[1], min_clk);
pstate_table->gfxclk_pstate.custom.min =
pstate_table->gfxclk_pstate.curr.min;
return -EINVAL;
}
pstate_table->gfxclk_pstate.custom.min = input[1];
} else if (input[0] == 1) {
if (input[1] > max_clk) {
dev_warn(
smu->adev->dev,
"Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
input[1], max_clk);
pstate_table->gfxclk_pstate.custom.max =
pstate_table->gfxclk_pstate.curr.max;
return -EINVAL;
}
pstate_table->gfxclk_pstate.custom.max = input[1];
} else {
return -EINVAL;
}
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
if (size != 2) {
dev_err(smu->adev->dev,
"Input parameter number not correct\n");
return -EINVAL;
}
if (!smu_cmn_feature_is_enabled(smu,
SMU_FEATURE_DPM_UCLK_BIT)) {
dev_warn(smu->adev->dev,
"UCLK_LIMITS setting not supported!\n");
return -EOPNOTSUPP;
}
max_clk =
SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.uclk_table);
if (input[0] == 0) {
dev_info(smu->adev->dev,
"Setting min UCLK level is not supported");
return -EINVAL;
} else if (input[0] == 1) {
if (input[1] > max_clk) {
dev_warn(
smu->adev->dev,
"Maximum UCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
input[1], max_clk);
pstate_table->uclk_pstate.custom.max =
pstate_table->uclk_pstate.curr.max;
return -EINVAL;
}
pstate_table->uclk_pstate.custom.max = input[1];
}
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size != 0) {
dev_err(smu->adev->dev,
"Input parameter number not correct\n");
return -EINVAL;
} else {
min_clk = SMU_DPM_TABLE_MIN(
&dpm_context->dpm_tables.gfx_table);
max_clk = SMU_DPM_TABLE_MAX(
&dpm_context->dpm_tables.gfx_table);
ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_GFXCLK, min_clk, max_clk, false);
if (ret)
return ret;
if (SMU_DPM_TABLE_MAX(uclk_table) !=
pstate_table->uclk_pstate.curr.max) {
min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.uclk_table);
max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.uclk_table);
ret = smu_v13_0_6_set_soft_freq_limited_range(smu,
SMU_UCLK, min_clk,
max_clk, false);
if (ret)
return ret;
}
smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
if (size != 0) {
dev_err(smu->adev->dev,
"Input parameter number not correct\n");
return -EINVAL;
} else {
if (!pstate_table->gfxclk_pstate.custom.min)
pstate_table->gfxclk_pstate.custom.min =
pstate_table->gfxclk_pstate.curr.min;
if (!pstate_table->gfxclk_pstate.custom.max)
pstate_table->gfxclk_pstate.custom.max =
pstate_table->gfxclk_pstate.curr.max;
min_clk = pstate_table->gfxclk_pstate.custom.min;
max_clk = pstate_table->gfxclk_pstate.custom.max;
ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_GFXCLK, min_clk, max_clk, false);
if (ret)
return ret;
if (!pstate_table->uclk_pstate.custom.max)
return 0;
min_clk = pstate_table->uclk_pstate.curr.min;
max_clk = pstate_table->uclk_pstate.custom.max;
return smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_UCLK, min_clk, max_clk, false);
}
break;
default:
return -ENOSYS;
}
return ret;
}
static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
struct smu_feature_bits *feature_mask)
{
int ret;
ret = smu_cmn_get_enabled_mask(smu, feature_mask);
if (ret == -EIO && !smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
smu_feature_bits_clearall(feature_mask);
ret = 0;
}
return ret;
}
static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
{
int ret;
struct smu_feature_bits feature_enabled;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
return smu_v13_0_12_is_dpm_running(smu);
ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
return smu_feature_bits_test_mask(&feature_enabled,
smu_v13_0_6_dpm_features.bits);
}
static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
void *table_data)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
struct amdgpu_device *adev = smu->adev;
uint32_t table_size;
int ret = 0;
if (!table_data)
return -EINVAL;
table_size = smu_table->tables[SMU_TABLE_I2C_COMMANDS].size;
memcpy(table->cpu_addr, table_data, table_size);
amdgpu_hdp_flush(adev, NULL);
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
NULL);
return ret;
}
static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
struct amdgpu_device *adev = smu_i2c->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
u16 dir;
if (!adev->pm.dpm_enabled)
return -EBUSY;
req = kzalloc_obj(*req);
if (!req)
return -ENOMEM;
req->I2CcontrollerPort = smu_i2c->port;
req->I2CSpeed = I2C_SPEED_FAST_400K;
req->SlaveAddress = msg[0].addr << 1;
dir = msg[0].flags & I2C_M_RD;
for (c = i = 0; i < num_msgs; i++) {
for (j = 0; j < msg[i].len; j++, c++) {
SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
if (!(msg[i].flags & I2C_M_RD)) {
cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
cmd->ReadWriteData = msg[i].buf[j];
}
if ((dir ^ msg[i].flags) & I2C_M_RD) {
dir = msg[i].flags & I2C_M_RD;
cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
}
req->NumCmds++;
if ((j == msg[i].len - 1) &&
((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
}
}
}
mutex_lock(&adev->pm.mutex);
r = smu_v13_0_6_request_i2c_xfer(smu, req);
if (r) {
r = smu_v13_0_6_request_i2c_xfer(smu, req);
if (r)
goto fail;
}
for (c = i = 0; i < num_msgs; i++) {
if (!(msg[i].flags & I2C_M_RD)) {
c += msg[i].len;
continue;
}
for (j = 0; j < msg[i].len; j++, c++) {
SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
msg[i].buf[j] = cmd->ReadWriteData;
}
}
r = num_msgs;
fail:
mutex_unlock(&adev->pm.mutex);
kfree(req);
return r;
}
static u32 smu_v13_0_6_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm smu_v13_0_6_i2c_algo = {
.master_xfer = smu_v13_0_6_i2c_xfer,
.functionality = smu_v13_0_6_i2c_func,
};
static const struct i2c_adapter_quirks smu_v13_0_6_i2c_control_quirks = {
.flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
.max_read_len = MAX_SW_I2C_COMMANDS,
.max_write_len = MAX_SW_I2C_COMMANDS,
.max_comb_1st_msg_len = 2,
.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
};
static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int res, i;
for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
struct i2c_adapter *control = &smu_i2c->adapter;
smu_i2c->adev = adev;
smu_i2c->port = i;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v13_0_6_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
control->quirks = &smu_v13_0_6_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
return res;
}
}
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
}
static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
adev->unique_id = pptable->PublicSerialNumber_AID;
}
static int smu_v13_0_6_get_bamaco_support(struct smu_context *smu)
{
return 0;
}
static const char *const throttling_logging_label[] = {
[THROTTLER_PROCHOT_BIT] = "Prochot",
[THROTTLER_PPT_BIT] = "PPT",
[THROTTLER_THERMAL_SOCKET_BIT] = "SOC",
[THROTTLER_THERMAL_VR_BIT] = "VR",
[THROTTLER_THERMAL_HBM_BIT] = "HBM"
};
static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
{
int throttler_idx, throttling_events = 0, buf_idx = 0;
struct amdgpu_device *adev = smu->adev;
uint32_t throttler_status;
char log_buf[256];
throttler_status = smu_v13_0_6_get_throttler_status(smu);
if (!throttler_status)
return;
memset(log_buf, 0, sizeof(log_buf));
for (throttler_idx = 0;
throttler_idx < ARRAY_SIZE(throttling_logging_label);
throttler_idx++) {
if (throttler_status & (1U << throttler_idx)) {
throttling_events++;
buf_idx += snprintf(
log_buf + buf_idx, sizeof(log_buf) - buf_idx,
"%s%s", throttling_events > 1 ? " and " : "",
throttling_logging_label[throttler_idx]);
if (buf_idx >= sizeof(log_buf)) {
dev_err(adev->dev, "buffer overflow!\n");
log_buf[sizeof(log_buf) - 1] = '\0';
break;
}
}
}
dev_warn(adev->dev,
"WARN: GPU is throttled, expect performance decrease. %s.\n",
log_buf);
kgd2kfd_smi_event_throttle(
smu->adev->kfd.dev,
smu_cmn_get_indep_throttler_status(throttler_status,
smu_v13_0_6_throttler_map));
}
static int
smu_v13_0_6_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
return REG_GET_FIELD(RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL),
PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
}
static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t speed_level;
uint32_t esm_ctrl;
esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
if ((esm_ctrl >> 15) & 0x1)
return (((esm_ctrl >> 8) & 0x7F) + 128);
speed_level = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
>> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (speed_level > LINK_SPEED_MAX)
speed_level = 0;
return pcie_gen_to_speed(speed_level + 1);
}
static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
void *table)
{
const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
int version = smu_v13_0_6_get_metrics_version(smu);
struct smu_v13_0_6_partition_metrics *xcp_metrics;
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
int ret, inst, i, j, k, idx;
MetricsTableV0_t *metrics_v0;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
u32 inst_mask;
bool per_inst;
if (!table)
return sizeof(*xcp_metrics);
for_each_xcp(adev->xcp_mgr, xcp, i) {
if (xcp->id == xcp_id)
break;
}
if (i == adev->xcp_mgr->num_xcps)
return -EINVAL;
xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table;
smu_v13_0_6_partition_metrics_init(xcp_metrics, 1, 1);
ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
if (ret)
return ret;
metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 12) &&
smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_get_xcp_metrics(smu, xcp, table,
metrics_v0);
metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
idx = 0;
for_each_inst(k, inst_mask) {
inst = GET_INST(VCN, k);
for (j = 0; j < num_jpeg_rings; ++j) {
xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
SMUQ10_ROUND(GET_METRIC_FIELD(
JpegBusy,
version)[(inst * num_jpeg_rings) + j]);
}
xcp_metrics->vcn_busy[idx] =
SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
GET_METRIC_FIELD(VclkFrequency, version)[inst]);
xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
GET_METRIC_FIELD(DclkFrequency, version)[inst]);
xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
GET_METRIC_FIELD(SocclkFrequency, version)[inst]);
idx++;
}
xcp_metrics->current_uclk =
SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
if (per_inst) {
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
idx = 0;
for_each_inst(k, inst_mask) {
inst = GET_INST(GC, k);
xcp_metrics->current_gfxclk[idx] =
SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency,
version)[inst]);
xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(
GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(
GET_GPU_METRIC_FIELD(GfxBusyAcc,
version)[inst]);
if (smu_v13_0_6_cap_supported(
smu, SMU_CAP(HST_LIMIT_METRICS))) {
xcp_metrics->gfx_below_host_limit_ppt_acc
[idx] = SMUQ10_ROUND(
metrics_v0->GfxclkBelowHostLimitPptAcc
[inst]);
xcp_metrics->gfx_below_host_limit_thm_acc
[idx] = SMUQ10_ROUND(
metrics_v0->GfxclkBelowHostLimitThmAcc
[inst]);
xcp_metrics->gfx_low_utilization_acc
[idx] = SMUQ10_ROUND(
metrics_v0
->GfxclkLowUtilizationAcc[inst]);
xcp_metrics->gfx_below_host_limit_total_acc
[idx] = SMUQ10_ROUND(
metrics_v0->GfxclkBelowHostLimitTotalAcc
[inst]);
}
idx++;
}
}
xcp_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version);
xcp_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version);
return sizeof(*xcp_metrics);
}
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_v13_0_6_gpu_metrics *gpu_metrics;
int version = smu_v13_0_6_get_metrics_version(smu);
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
int ret = 0, xcc_id, inst, i, j;
MetricsTableV0_t *metrics_v0;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
u16 link_width_level;
u8 num_jpeg_rings;
bool per_inst;
ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
if (ret)
return ret;
metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
gpu_metrics = (struct smu_v13_0_6_gpu_metrics *)smu_driver_table_ptr(
smu, SMU_DRIVER_TABLE_GPU_METRICS);
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0,
gpu_metrics);
goto fill;
}
metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
gpu_metrics->temperature_mem =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version));
gpu_metrics->temperature_vrsoc =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version));
gpu_metrics->average_gfx_activity =
SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version));
gpu_metrics->average_umc_activity =
SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version));
gpu_metrics->mem_max_bandwidth =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, version));
gpu_metrics->curr_socket_power =
SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version));
gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, version);
for (i = 0; i < MAX_GFX_CLKS; i++) {
xcc_id = GET_INST(GC, i);
if (xcc_id >= 0)
gpu_metrics->current_gfxclk[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]);
if (i < MAX_CLKS) {
gpu_metrics->current_socclk[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[i]);
inst = GET_INST(VCN, i);
if (inst >= 0) {
gpu_metrics->current_vclk0[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency,
version)[inst]);
gpu_metrics->current_dclk0[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency,
version)[inst]);
}
}
}
gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version);
gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, version);
gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, version);
gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, version);
gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, version);
gpu_metrics->hbm_thm_residency_acc =
GET_METRIC_FIELD(HbmThmResidencyAcc, version);
gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak,
version) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) {
gpu_metrics->pcie_link_width = GET_GPU_METRIC_FIELD(PCIeLinkWidth, version);
gpu_metrics->pcie_link_speed =
pcie_gen_to_speed(GET_GPU_METRIC_FIELD(PCIeLinkSpeed, version));
} else if (!amdgpu_sriov_vf(adev)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
if (link_width_level > MAX_LINK_WIDTH)
link_width_level = 0;
gpu_metrics->pcie_link_width =
DECODE_LANE_WIDTH(link_width_level);
gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu);
}
gpu_metrics->pcie_bandwidth_acc =
SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidthAcc, version)[0]);
gpu_metrics->pcie_bandwidth_inst =
SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidth, version)[0]);
gpu_metrics->pcie_l0_to_recov_count_acc =
GET_GPU_METRIC_FIELD(PCIeL0ToRecoveryCountAcc, version);
gpu_metrics->pcie_replay_count_acc =
GET_GPU_METRIC_FIELD(PCIenReplayAAcc, version);
gpu_metrics->pcie_replay_rover_count_acc =
GET_GPU_METRIC_FIELD(PCIenReplayARolloverCountAcc, version);
gpu_metrics->pcie_nak_sent_count_acc =
GET_GPU_METRIC_FIELD(PCIeNAKSentCountAcc, version);
gpu_metrics->pcie_nak_rcvd_count_acc =
GET_GPU_METRIC_FIELD(PCIeNAKReceivedCountAcc, version);
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS)))
gpu_metrics->pcie_lc_perf_other_end_recovery =
GET_GPU_METRIC_FIELD(PCIeOtherEndRecoveryAcc, version);
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, version));
gpu_metrics->mem_activity_acc =
SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
j = amdgpu_xgmi_get_ext_link(adev, i);
if (j < 0 || j >= NUM_XGMI_LINKS)
continue;
gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND(
GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND(
GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
gpu_metrics->xgmi_link_status[j] = ret;
}
per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
inst = GET_INST(JPEG, i);
for (j = 0; j < num_jpeg_rings; ++j)
gpu_metrics->jpeg_busy[(i * num_jpeg_rings) + j] =
SMUQ10_ROUND(GET_METRIC_FIELD(
JpegBusy,
version)[(inst * num_jpeg_rings) + j]);
}
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
inst = GET_INST(VCN, i);
gpu_metrics->vcn_busy[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
}
if (per_inst) {
for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
inst = GET_INST(GC, i);
gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND(
GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND(
GET_GPU_METRIC_FIELD(GfxBusyAcc,
version)[inst]);
if (smu_v13_0_6_cap_supported(
smu, SMU_CAP(HST_LIMIT_METRICS))) {
gpu_metrics->gfx_below_host_limit_ppt_acc
[i] = SMUQ10_ROUND(
metrics_v0->GfxclkBelowHostLimitPptAcc
[inst]);
gpu_metrics->gfx_below_host_limit_thm_acc
[i] = SMUQ10_ROUND(
metrics_v0->GfxclkBelowHostLimitThmAcc
[inst]);
gpu_metrics->gfx_low_utilization_acc
[i] = SMUQ10_ROUND(
metrics_v0
->GfxclkLowUtilizationAcc[inst]);
gpu_metrics->gfx_below_host_limit_total_acc
[i] = SMUQ10_ROUND(
metrics_v0->GfxclkBelowHostLimitTotalAcc
[inst]);
}
}
}
gpu_metrics->xgmi_link_width = GET_METRIC_FIELD(XgmiWidth, version);
gpu_metrics->xgmi_link_speed = GET_METRIC_FIELD(XgmiBitrate, version);
gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version);
fill:
*table = gpu_metrics;
smu_driver_table_update_cache_time(smu, SMU_DRIVER_TABLE_GPU_METRICS);
return sizeof(*gpu_metrics);
}
static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int i;
for (i = 0; i < 16; i++)
pci_write_config_dword(adev->pdev, i * 4,
adev->pdev->saved_config_space[i]);
pci_restore_msi_state(adev->pdev);
}
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
{
struct smu_msg_ctl *ctl = &smu->msg_ctl;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int timeout = 10;
mutex_lock(&ctl->lock);
ret = smu_msg_send_async_locked(ctl, SMU_MSG_GfxDeviceDriverReset,
SMU_RESET_MODE_2);
if (ret)
goto out;
msleep(200);
dev_dbg(adev->dev, "restore config space...\n");
amdgpu_device_load_pci_state(adev->pdev);
if (!(adev->flags & AMD_IS_APU))
smu_v13_0_6_restore_pci_config(smu);
dev_dbg(adev->dev, "wait for reset ack\n");
do {
ret = smu_msg_wait_response(ctl, 0);
if (ret == -ETIME) {
--timeout;
usleep_range(500, 1000);
continue;
}
if (ret)
goto out;
} while (ret == -ETIME && timeout);
out:
mutex_unlock(&ctl->lock);
if (ret)
dev_err(adev->dev, "failed to send mode2 reset, error code %d",
ret);
return ret;
}
static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
struct smu_temperature_range *range)
{
struct amdgpu_device *adev = smu->adev;
u32 aid_temp, xcd_temp, max_temp;
u32 ccd_temp = 0;
int ret;
if (amdgpu_sriov_vf(smu->adev))
return 0;
if (!range)
return -EINVAL;
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(CTF_LIMIT)))
return 0;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_AID_THM_TYPE, &aid_temp);
if (ret)
goto failed;
if (adev->flags & AMD_IS_APU) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_CCD_THM_TYPE, &ccd_temp);
if (ret)
goto failed;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_XCD_THM_TYPE, &xcd_temp);
if (ret)
goto failed;
range->hotspot_emergency_max = max3(aid_temp, xcd_temp, ccd_temp) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_HBM_THM_TYPE, &max_temp);
if (ret)
goto failed;
range->mem_emergency_max =
max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetThermalLimit,
PPSMC_THROTTLING_LIMIT_TYPE_SOCKET,
&max_temp);
if (ret)
goto failed;
range->hotspot_crit_max =
max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetThermalLimit,
PPSMC_THROTTLING_LIMIT_TYPE_HBM,
&max_temp);
if (ret)
goto failed;
range->mem_crit_max = max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
failed:
return ret;
}
static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
u32 fatal_err, param;
int ret = 0;
fatal_err = 0;
param = SMU_RESET_MODE_1;
if (amdgpu_ras_get_fed_status(adev))
fatal_err = 1;
param |= (fatal_err << 16);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
param, NULL);
if (!ret)
msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
return ret;
}
static int smu_v13_0_6_link_reset(struct smu_context *smu)
{
int ret = 0;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
SMU_RESET_MODE_4, NULL);
return ret;
}
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
return true;
}
static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int var = (adev->pdev->device & 0xF);
if (var == 0x0 || var == 0x1 || var == 0x3)
return true;
return false;
}
static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
uint32_t size)
{
int ret = 0;
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
if (ret)
dev_err(smu->adev->dev,
"[%s] failed to message SMU to update HBM bad pages number\n",
__func__);
return ret;
}
static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
{
int ret;
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(RMA_MSG)))
return 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL);
if (ret)
dev_err(smu->adev->dev,
"[%s] failed to send BadPageThreshold event to SMU\n",
__func__);
return ret;
}
static bool smu_v13_0_6_reset_sdma_is_supported(struct smu_context *smu)
{
bool ret = true;
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET))) {
dev_info(smu->adev->dev,
"SDMA reset capability is not supported\n");
ret = false;
}
return ret;
}
static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
if (!smu_v13_0_6_reset_sdma_is_supported(smu))
return -EOPNOTSUPP;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_ResetSDMA, inst_mask, NULL);
if (ret)
dev_err(smu->adev->dev,
"failed to send ResetSDMA event with mask 0x%x\n",
inst_mask);
return ret;
}
static bool smu_v13_0_6_reset_vcn_is_supported(struct smu_context *smu)
{
return smu_v13_0_6_cap_supported(smu, SMU_CAP(VCN_RESET));
}
static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL);
if (ret)
dev_err(smu->adev->dev,
"failed to send ResetVCN event with mask 0x%x\n",
inst_mask);
return ret;
}
static int smu_v13_0_6_ras_send_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t param, uint32_t *read_arg)
{
int ret;
switch (msg) {
case SMU_MSG_QueryValidMcaCount:
case SMU_MSG_QueryValidMcaCeCount:
case SMU_MSG_McaBankDumpDW:
case SMU_MSG_McaBankCeDumpDW:
case SMU_MSG_ClearMcaOnRead:
ret = smu_cmn_send_smc_msg_with_param(smu, msg, param, read_arg);
break;
default:
ret = -EPERM;
}
return ret;
}
static int smu_v13_0_6_post_init(struct smu_context *smu)
{
if (smu_v13_0_6_is_link_reset_supported(smu))
smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__LINK_RESET);
if (smu_v13_0_6_reset_sdma_is_supported(smu))
smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);
if (smu_v13_0_6_reset_vcn_is_supported(smu))
smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__VCN_RESET);
return 0;
}
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
return smu_v13_0_6_mca_set_debug_mode(smu, enable);
}
static int smu_v13_0_6_get_valid_mca_count(struct smu_context *smu, enum amdgpu_mca_error_type type, uint32_t *count)
{
uint32_t msg;
int ret;
if (!count)
return -EINVAL;
switch (type) {
case AMDGPU_MCA_ERROR_TYPE_UE:
msg = SMU_MSG_QueryValidMcaCount;
break;
case AMDGPU_MCA_ERROR_TYPE_CE:
msg = SMU_MSG_QueryValidMcaCeCount;
break;
default:
return -EINVAL;
}
ret = smu_cmn_send_smc_msg(smu, msg, count);
if (ret) {
*count = 0;
return ret;
}
return 0;
}
static int __smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type,
int idx, int offset, uint32_t *val)
{
uint32_t msg, param;
switch (type) {
case AMDGPU_MCA_ERROR_TYPE_UE:
msg = SMU_MSG_McaBankDumpDW;
break;
case AMDGPU_MCA_ERROR_TYPE_CE:
msg = SMU_MSG_McaBankCeDumpDW;
break;
default:
return -EINVAL;
}
param = ((idx & 0xffff) << 16) | (offset & 0xfffc);
return smu_cmn_send_smc_msg_with_param(smu, msg, param, val);
}
static int smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type,
int idx, int offset, uint32_t *val, int count)
{
int ret, i;
if (!val)
return -EINVAL;
for (i = 0; i < count; i++) {
ret = __smu_v13_0_6_mca_dump_bank(smu, type, idx, offset + (i << 2), &val[i]);
if (ret)
return ret;
}
return 0;
}
static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT] = {
MCA_BANK_IPID(UMC, 0x96, 0x0),
MCA_BANK_IPID(SMU, 0x01, 0x1),
MCA_BANK_IPID(MP5, 0x01, 0x2),
MCA_BANK_IPID(PCS_XGMI, 0x50, 0x0),
};
static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info)
{
u64 ipid = entry->regs[MCA_REG_IDX_IPID];
u32 instidhi, instid;
info->hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
info->mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
instidhi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi);
instid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo);
info->aid = ((instidhi >> 2) & 0x03);
info->socket_id = ((instid & 0x1) << 2) | (instidhi & 0x03);
}
static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
int idx, int reg_idx, uint64_t *val)
{
struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t data[2] = {0, 0};
int ret;
if (!val || reg_idx >= MCA_REG_IDX_COUNT)
return -EINVAL;
ret = smu_v13_0_6_mca_dump_bank(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data));
if (ret)
return ret;
*val = (uint64_t)data[1] << 32 | data[0];
dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n",
type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
return 0;
}
static int mca_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
int idx, struct mca_bank_entry *entry)
{
int i, ret;
for (i = 0; i < ARRAY_SIZE(entry->regs); i++) {
ret = mca_bank_read_reg(adev, type, idx, i, &entry->regs[i]);
if (ret)
return ret;
}
entry->idx = idx;
entry->type = type;
mca_bank_entry_info_decode(entry, &entry->info);
return 0;
}
static int mca_decode_ipid_to_hwip(uint64_t val)
{
const struct mca_bank_ipid *ipid;
uint16_t hwid, mcatype;
int i;
hwid = REG_GET_FIELD(val, MCMP1_IPIDT0, HardwareID);
mcatype = REG_GET_FIELD(val, MCMP1_IPIDT0, McaType);
for (i = 0; i < ARRAY_SIZE(smu_v13_0_6_mca_ipid_table); i++) {
ipid = &smu_v13_0_6_mca_ipid_table[i];
if (!ipid->hwid)
continue;
if (ipid->hwid == hwid && ipid->mcatype == mcatype)
return i;
}
return AMDGPU_MCA_IP_UNKNOW;
}
static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
{
uint64_t status0;
uint32_t ext_error_code;
uint32_t odecc_err_cnt;
status0 = entry->regs[MCA_REG_IDX_STATUS];
ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status0);
odecc_err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]);
if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
*count = 0;
return 0;
}
if (umc_v12_0_is_deferred_error(adev, status0) ||
umc_v12_0_is_uncorrectable_error(adev, status0) ||
umc_v12_0_is_correctable_error(adev, status0))
*count = (ext_error_code == 0) ? odecc_err_cnt : 1;
amdgpu_umc_update_ecc_status(adev,
entry->regs[MCA_REG_IDX_STATUS],
entry->regs[MCA_REG_IDX_IPID],
entry->regs[MCA_REG_IDX_ADDR]);
return 0;
}
static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry,
uint32_t *count)
{
u32 ext_error_code;
u32 err_cnt;
ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]);
err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]);
if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
(ext_error_code == 0 || ext_error_code == 9))
*count = err_cnt;
else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6)
*count = err_cnt;
return 0;
}
static bool mca_smu_check_error_code(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
uint32_t errcode)
{
int i;
if (!mca_ras->err_code_count || !mca_ras->err_code_array)
return true;
for (i = 0; i < mca_ras->err_code_count; i++) {
if (errcode == mca_ras->err_code_array[i])
return true;
}
return false;
}
static int mca_gfx_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
{
uint64_t status0, misc0;
status0 = entry->regs[MCA_REG_IDX_STATUS];
if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
*count = 0;
return 0;
}
if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 &&
REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) {
*count = 1;
return 0;
} else {
misc0 = entry->regs[MCA_REG_IDX_MISC0];
*count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt);
}
return 0;
}
static int mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
{
uint64_t status0, misc0;
status0 = entry->regs[MCA_REG_IDX_STATUS];
if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
*count = 0;
return 0;
}
if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 &&
REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) {
if (count)
*count = 1;
return 0;
}
misc0 = entry->regs[MCA_REG_IDX_MISC0];
*count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt);
return 0;
}
static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
{
uint32_t instlo;
instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
instlo &= GENMASK(31, 1);
switch (instlo) {
case 0x36430400:
case 0x38430400:
case 0x40430400:
return true;
default:
return false;
}
return false;
};
static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
{
struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t errcode, instlo;
instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
instlo &= GENMASK(31, 1);
if (instlo != 0x03b30400)
return false;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND))) {
errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
errcode &= 0xff;
} else {
errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode);
}
return mca_smu_check_error_code(adev, mca_ras, errcode);
}
static int sdma_err_codes[] = { CODE_SDMA0, CODE_SDMA1, CODE_SDMA2, CODE_SDMA3 };
static int mmhub_err_codes[] = {
CODE_DAGB0, CODE_DAGB0 + 1, CODE_DAGB0 + 2, CODE_DAGB0 + 3, CODE_DAGB0 + 4,
CODE_EA0, CODE_EA0 + 1, CODE_EA0 + 2, CODE_EA0 + 3, CODE_EA0 + 4,
CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE,
};
static int vcn_err_codes[] = {
CODE_VIDD, CODE_VIDV,
};
static int jpeg_err_codes[] = {
CODE_JPEG0S, CODE_JPEG0D, CODE_JPEG1S, CODE_JPEG1D,
CODE_JPEG2S, CODE_JPEG2D, CODE_JPEG3S, CODE_JPEG3D,
CODE_JPEG4S, CODE_JPEG4D, CODE_JPEG5S, CODE_JPEG5D,
CODE_JPEG6S, CODE_JPEG6D, CODE_JPEG7S, CODE_JPEG7D,
};
static const struct mca_ras_info mca_ras_table[] = {
{
.blkid = AMDGPU_RAS_BLOCK__UMC,
.ip = AMDGPU_MCA_IP_UMC,
.get_err_count = mca_umc_mca_get_err_count,
}, {
.blkid = AMDGPU_RAS_BLOCK__GFX,
.ip = AMDGPU_MCA_IP_SMU,
.get_err_count = mca_gfx_mca_get_err_count,
.bank_is_valid = mca_gfx_smu_bank_is_valid,
}, {
.blkid = AMDGPU_RAS_BLOCK__SDMA,
.ip = AMDGPU_MCA_IP_SMU,
.err_code_array = sdma_err_codes,
.err_code_count = ARRAY_SIZE(sdma_err_codes),
.get_err_count = mca_smu_mca_get_err_count,
.bank_is_valid = mca_smu_bank_is_valid,
}, {
.blkid = AMDGPU_RAS_BLOCK__MMHUB,
.ip = AMDGPU_MCA_IP_SMU,
.err_code_array = mmhub_err_codes,
.err_code_count = ARRAY_SIZE(mmhub_err_codes),
.get_err_count = mca_smu_mca_get_err_count,
.bank_is_valid = mca_smu_bank_is_valid,
}, {
.blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL,
.ip = AMDGPU_MCA_IP_PCS_XGMI,
.get_err_count = mca_pcs_xgmi_mca_get_err_count,
}, {
.blkid = AMDGPU_RAS_BLOCK__VCN,
.ip = AMDGPU_MCA_IP_SMU,
.err_code_array = vcn_err_codes,
.err_code_count = ARRAY_SIZE(vcn_err_codes),
.get_err_count = mca_smu_mca_get_err_count,
.bank_is_valid = mca_smu_bank_is_valid,
}, {
.blkid = AMDGPU_RAS_BLOCK__JPEG,
.ip = AMDGPU_MCA_IP_SMU,
.err_code_array = jpeg_err_codes,
.err_code_count = ARRAY_SIZE(jpeg_err_codes),
.get_err_count = mca_smu_mca_get_err_count,
.bank_is_valid = mca_smu_bank_is_valid,
},
};
static const struct mca_ras_info *mca_get_mca_ras_info(struct amdgpu_device *adev, enum amdgpu_ras_block blkid)
{
int i;
for (i = 0; i < ARRAY_SIZE(mca_ras_table); i++) {
if (mca_ras_table[i].blkid == blkid)
return &mca_ras_table[i];
}
return NULL;
}
static int mca_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
switch (type) {
case AMDGPU_MCA_ERROR_TYPE_UE:
case AMDGPU_MCA_ERROR_TYPE_CE:
ret = smu_v13_0_6_get_valid_mca_count(smu, type, count);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
{
if (mca_decode_ipid_to_hwip(entry->regs[MCA_REG_IDX_IPID]) != mca_ras->ip)
return false;
if (mca_ras->bank_is_valid)
return mca_ras->bank_is_valid(mca_ras, adev, type, entry);
return true;
}
static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
struct mca_bank_entry *entry, uint32_t *count)
{
const struct mca_ras_info *mca_ras;
if (!entry || !count)
return -EINVAL;
mca_ras = mca_get_mca_ras_info(adev, blk);
if (!mca_ras)
return -EOPNOTSUPP;
if (!mca_bank_is_valid(adev, mca_ras, type, entry)) {
*count = 0;
return 0;
}
return mca_ras->get_err_count(mca_ras, adev, type, entry, count);
}
static int mca_smu_get_mca_entry(struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, int idx, struct mca_bank_entry *entry)
{
return mca_get_mca_entry(adev, type, idx, entry);
}
static int mca_smu_get_valid_mca_count(struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, uint32_t *count)
{
return mca_get_valid_mca_count(adev, type, count);
}
static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = {
.max_ue_count = 12,
.max_ce_count = 12,
.mca_set_debug_mode = mca_smu_set_debug_mode,
.mca_parse_mca_error_count = mca_smu_parse_mca_error_count,
.mca_get_mca_entry = mca_smu_get_mca_entry,
.mca_get_valid_mca_count = mca_smu_get_valid_mca_count,
};
static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
return smu_v13_0_6_mca_set_debug_mode(smu, enable);
}
static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_smu_type type, u32 *count)
{
uint32_t msg;
int ret;
if (!count)
return -EINVAL;
switch (type) {
case ACA_SMU_TYPE_UE:
msg = SMU_MSG_QueryValidMcaCount;
break;
case ACA_SMU_TYPE_CE:
msg = SMU_MSG_QueryValidMcaCeCount;
break;
default:
return -EINVAL;
}
ret = smu_cmn_send_smc_msg(smu, msg, count);
if (ret) {
*count = 0;
return ret;
}
return 0;
}
static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev,
enum aca_smu_type type, u32 *count)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
switch (type) {
case ACA_SMU_TYPE_UE:
case ACA_SMU_TYPE_CE:
ret = smu_v13_0_6_get_valid_aca_count(smu, type, count);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type,
int idx, int offset, u32 *val)
{
uint32_t msg, param;
switch (type) {
case ACA_SMU_TYPE_UE:
msg = SMU_MSG_McaBankDumpDW;
break;
case ACA_SMU_TYPE_CE:
msg = SMU_MSG_McaBankCeDumpDW;
break;
default:
return -EINVAL;
}
param = ((idx & 0xffff) << 16) | (offset & 0xfffc);
return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val);
}
static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type,
int idx, int offset, u32 *val, int count)
{
int ret, i;
if (!val)
return -EINVAL;
for (i = 0; i < count; i++) {
ret = __smu_v13_0_6_aca_bank_dump(smu, type, idx, offset + (i << 2), &val[i]);
if (ret)
return ret;
}
return 0;
}
static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_smu_type type,
int idx, int reg_idx, u64 *val)
{
struct smu_context *smu = adev->powerplay.pp_handle;
u32 data[2] = {0, 0};
int ret;
if (!val || reg_idx >= ACA_REG_IDX_COUNT)
return -EINVAL;
ret = smu_v13_0_6_aca_bank_dump(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data));
if (ret)
return ret;
*val = (u64)data[1] << 32 | data[0];
dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n",
type == ACA_SMU_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
return 0;
}
static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
enum aca_smu_type type, int idx, struct aca_bank *bank)
{
int i, ret, count;
count = min_t(int, 16, ARRAY_SIZE(bank->regs));
for (i = 0; i < count; i++) {
ret = aca_bank_read_reg(adev, type, idx, i, &bank->regs[i]);
if (ret)
return ret;
}
return 0;
}
static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int error_code;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND)))
error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
else
error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
return error_code & 0xff;
}
static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
.max_ue_bank_count = 12,
.max_ce_bank_count = 12,
.set_debug_mode = aca_smu_set_debug_mode,
.get_valid_aca_count = aca_smu_get_valid_aca_count,
.get_valid_aca_bank = aca_smu_get_valid_aca_bank,
.parse_error_code = aca_smu_parse_error_code,
};
static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu)
{
smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
== IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL;
}
static int smu_v13_0_6_get_ras_smu_drv(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv)
{
if (!ras_smu_drv)
return -EINVAL;
if (amdgpu_sriov_vf(smu->adev))
return -EOPNOTSUPP;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_HROM_EN_BIT))
smu_v13_0_6_cap_set(smu, SMU_CAP(RAS_EEPROM));
switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 12):
*ras_smu_drv = &smu_v13_0_12_ras_smu_drv;
break;
default:
*ras_smu_drv = NULL;
break;
}
return 0;
}
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.init_allowed_features = smu_v13_0_6_init_allowed_features,
.set_default_dpm_table = smu_v13_0_6_set_default_dpm_table,
.populate_umd_state_clk = smu_v13_0_6_populate_umd_state_clk,
.emit_clk_levels = smu_v13_0_6_emit_clk_levels,
.force_clk_levels = smu_v13_0_6_force_clk_levels,
.read_sensor = smu_v13_0_6_read_sensor,
.set_performance_level = smu_v13_0_6_set_performance_level,
.get_power_limit = smu_v13_0_6_get_power_limit,
.is_dpm_running = smu_v13_0_6_is_dpm_running,
.get_unique_id = smu_v13_0_6_get_unique_id,
.init_microcode = smu_v13_0_6_init_microcode,
.fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_6_init_smc_tables,
.fini_smc_tables = smu_v13_0_6_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_6_check_fw_status,
.check_fw_version = smu_v13_0_6_check_fw_version,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
.system_features_control = smu_v13_0_6_system_features_control,
.get_enabled_mask = smu_v13_0_6_get_enabled_mask,
.feature_is_enabled = smu_cmn_feature_is_enabled,
.set_power_limit = smu_v13_0_6_set_power_limit,
.get_ppt_limit = smu_v13_0_6_get_ppt_limit,
.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
.register_irq_handler = smu_v13_0_6_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
.setup_pptable = smu_v13_0_6_setup_pptable,
.get_bamaco_support = smu_v13_0_6_get_bamaco_support,
.get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
.od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
.log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
.get_pm_metrics = smu_v13_0_6_get_pm_metrics,
.get_xcp_metrics = smu_v13_0_6_get_xcp_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
.mode2_reset = smu_v13_0_6_mode2_reset,
.link_reset = smu_v13_0_6_link_reset,
.wait_for_event = smu_v13_0_wait_for_event,
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
.dpm_reset_vcn = smu_v13_0_6_reset_vcn,
.post_init = smu_v13_0_6_post_init,
.ras_send_msg = smu_v13_0_6_ras_send_msg,
.get_ras_smu_drv = smu_v13_0_6_get_ras_smu_drv,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
{
const struct cmn2asic_msg_mapping *message_map;
smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
message_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
smu_v13_0_12_message_map : smu_v13_0_6_message_map;
smu->clock_map = smu_v13_0_6_clk_map;
smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map;
smu->table_map = smu_v13_0_6_table_map;
smu->smc_driver_if_version = SMU_IGNORE_IF_VERSION;
smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
smu_v13_0_init_msg_ctl(smu, message_map);
smu_v13_0_6_set_temp_funcs(smu);
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
}