diff options
author | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2021-05-26 09:40:54 -0400 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2021-05-26 09:40:54 -0400 |
commit | abfe041de01f16d74df522d92cf5e3f6523971dd (patch) | |
tree | 1cd997085149cf09e110baa4f98725ed18ffa05a /drivers/gpu/drm/amd/pm | |
parent | 7bc188cc2c8c7b21bfa0782f0d22bbf245ce1f63 (diff) | |
parent | 9a91e5e0af5e03940d0eec72c36364a1701de240 (diff) |
Merge drm/drm-next into drm-intel-next
Getting in sync with -rc2
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
-rw-r--r-- | drivers/gpu/drm/amd/pm/amdgpu_pm.c | 66 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h | 366 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/inc/smu_v11_0.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/inc/smu_v13_0.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/powerplay/si_dpm.c | 174 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 797 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 99 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 7 |
17 files changed, 1423 insertions, 216 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 9a54066ec0af..13da377888d2 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -735,6 +735,23 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * - a list of valid ranges for sclk, mclk, and voltage curve points * labeled OD_RANGE * + * < For APUs > + * + * Reading the file will display: + * + * - minimum and maximum engine clock labeled OD_SCLK + * + * - a list of valid ranges for sclk labeled OD_RANGE + * + * < For VanGogh > + * + * Reading the file will display: + * + * - minimum and maximum engine clock labeled OD_SCLK + * - minimum and maximum core clocks labeled OD_CCLK + * + * - a list of valid ranges for sclk and cclk labeled OD_RANGE + * * To manually adjust these settings: * * - First select manual using power_dpm_force_performance_level @@ -743,7 +760,10 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * string that contains "s/m index clock" to the file. The index * should be 0 if to set minimum clock. And 1 if to set maximum * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. - * "m 1 800" will update maximum mclk to be 800Mhz. + * "m 1 800" will update maximum mclk to be 800Mhz. For core + * clocks on VanGogh, the string contains "p core index clock". + * E.g., "p 2 0 800" would set the minimum core clock on core + * 2 to 800Mhz. * * For sclk voltage curve, enter the new values by writing a * string that contains "vc point clock voltage" to the file. The @@ -3534,6 +3554,45 @@ out: DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info); +/* + * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW + * + * Reads debug memory region allocated to PMFW + */ +static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + size_t smu_prv_buf_size; + void *smu_prv_buf; + + if (amdgpu_in_reset(adev)) + return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; + + if (pp_funcs && pp_funcs->get_smu_prv_buf_details) + pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf, + &smu_prv_buf_size); + else + return -ENOSYS; + + if (!smu_prv_buf || !smu_prv_buf_size) + return -EINVAL; + + return simple_read_from_buffer(buf, size, pos, smu_prv_buf, + smu_prv_buf_size); +} + +static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = amdgpu_pm_prv_buffer_read, + .llseek = default_llseek, +}; + #endif void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) @@ -3545,5 +3604,10 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) debugfs_create_file("amdgpu_pm_info", 0444, root, adev, &amdgpu_debugfs_pm_info_fops); + if (adev->pm.smu_prv_buffer_size > 0) + debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root, + adev, + &amdgpu_debugfs_pm_prv_buffer_fops, + adev->pm.smu_prv_buffer_size); #endif } diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 8bb224f6c762..523f9d2982e9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -392,10 +392,18 @@ struct smu_baco_context bool platform_support; }; +struct smu_freq_info { + uint32_t min; + uint32_t max; + uint32_t freq_level; +}; + struct pstates_clk_freq { uint32_t min; uint32_t standard; uint32_t peak; + struct smu_freq_info custom; + struct smu_freq_info curr; }; struct smu_umd_pstate_table { diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h index fa95147b5a63..7a6d049e65e3 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h +++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h @@ -129,8 +129,8 @@ #define FEATURE_SMNCLK_DPM_BIT 47 #define FEATURE_PERLINK_GMIDOWN_BIT 48 #define FEATURE_GFX_EDC_BIT 49 -#define FEATURE_SPARE_50_BIT 50 -#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_GFX_PER_PART_VMIN_BIT 50 +#define FEATURE_SMART_SHIFT_BIT 51 #define FEATURE_SPARE_52_BIT 52 #define FEATURE_SPARE_53_BIT 53 #define FEATURE_SPARE_54_BIT 54 @@ -941,6 +941,367 @@ typedef struct { } PPTable_t; typedef struct { + // MAJOR SECTION: SKU PARAMETERS + + uint32_t Version; + + // SECTION: Feature Enablement + uint32_t FeaturesToRun[NUM_FEATURES / 32]; + + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // Watts + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // Watts + uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + + uint16_t TdcLimit[TDC_THROTTLER_COUNT]; // Amps + uint16_t TdcLimitTau[TDC_THROTTLER_COUNT]; // Time constant of LPF in ms + + uint16_t TemperatureLimit[TEMP_COUNT]; // Celcius + + uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) + + // SECTION: Power Configuration + uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured. Use defines from PwrConfig_e + uint8_t TotalPowerPadding[3]; + + // SECTION: APCC Settings + uint32_t ApccPlusResidencyLimit; + + //SECTION: SMNCLK DPM + uint16_t SmnclkDpmFreq [NUM_SMNCLK_DPM_LEVELS]; // in MHz + uint16_t SmnclkDpmVoltage [NUM_SMNCLK_DPM_LEVELS]; // mV(Q2) + + uint32_t PaddingAPCC; + uint16_t PerPartDroopVsetGfxDfll[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //In mV(Q2) + uint16_t PaddingPerPartDroop; + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See Throtter masks defines + + // SECTION: FW DSTATE Settings + uint32_t FwDStateMask; // See FW DState masks defines + + // SECTION: ULV Settings + uint16_t UlvVoltageOffsetSoc; // In mV(Q2) + uint16_t UlvVoltageOffsetGfx; // In mV(Q2) + + uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode + uint16_t MinVoltageUlvSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC in ULV mode + + uint16_t SocLIVmin; + uint16_t SocLIVminoffset; + + uint8_t GceaLinkMgrIdleThreshold; //Set by SMU FW during enablment of GFXOFF. Controls delay for GFX SDP port disconnection during idle events + uint8_t paddingRlcUlvParams[3]; + + // SECTION: Voltage Control Parameters + uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX + uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC + uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX + uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC + + uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits + uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits + + // SECTION: Temperature Dependent Vmin + uint16_t VDDGFX_TVmin; //Celcius + uint16_t VDDSOC_TVmin; //Celcius + uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 + uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 + uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 + uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 + + uint16_t VDDGFX_TVminHystersis; // Celcius + uint16_t VDDSOC_TVminHystersis; // Celcius + + //SECTION: DPM Config 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + uint32_t Paddingclks; + + DroopInt_t PerPartDroopModelGfxDfll[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //GHz ->Vstore in IEEE float format + + uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS ]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + + // Used for MALL performance boost + uint16_t FclkBoostFreq; // In Mhz + uint16_t FclkParamPadding; + + // SECTION: DPM Config 2 + uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz + uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemVddciVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemMvddVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + // GFXCLK DPM + uint16_t GfxclkFgfxoffEntry; // in Mhz + uint16_t GfxclkFinit; // in Mhz + uint16_t GfxclkFidle; // in MHz + uint8_t GfxclkSource; // 0 = PLL, 1 = DFLL + uint8_t GfxclkPadding; + + // GFX GPO + uint8_t GfxGpoSubFeatureMask; // bit 0 = PACE, bit 1 = DEM + uint8_t GfxGpoEnabledWorkPolicyMask; //Any policy that GPO can be enabled + uint8_t GfxGpoDisabledWorkPolicyMask; //Any policy that GPO can be disabled + uint8_t GfxGpoPadding[1]; + uint32_t GfxGpoVotingAllow; //For indicating which feature changes should result in a GPO table recalculation + + uint32_t GfxGpoPadding32[4]; + + uint16_t GfxDcsFopt; // Optimal GFXCLK for DCS in Mhz + uint16_t GfxDcsFclkFopt; // Optimal FCLK for DCS in Mhz + uint16_t GfxDcsUclkFopt; // Optimal UCLK for DCS in Mhz + + uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase + + uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase + uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. + + uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS. + + uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. + uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. + + uint32_t DcsParamPadding[5]; + + uint16_t FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; // Q8.8 + + // UCLK section + uint8_t LowestUclkReservedForUlv; // Set this to 1 if UCLK DPM0 is reserved for ULV-mode only + uint8_t PaddingMem[3]; + + uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 4 DPM states, 0-P0, 1-P1, 2-P2, 3-P3. + + // Used for 2-Step UCLK change workaround + UclkDpmChangeRange_t UclkDpmSrcFreqRange; // In Mhz + UclkDpmChangeRange_t UclkDpmTargFreqRange; // In Mhz + uint16_t UclkDpmMidstepFreq; // In Mhz + uint16_t UclkMidstepPadding; + + // Link DPM Settings + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + // SECTION: Fan Control + uint16_t FanStopTemp; //Celcius + uint16_t FanStartTemp; //Celcius + + uint16_t FanGain[TEMP_COUNT]; + + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t MGpuFanBoostLimitRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint16_t FanPadding16; + uint8_t FanTempInputSelect; + uint8_t FanPadding; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + // SECTION: AVFS + // Overrides + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + uint8_t dBtcGbGfxDfllModelSelect; //0 -> fused piece-wise model, 1 -> piece-wise linear(PPTable), 2 -> quadratic model(PPTable) + uint8_t Padding8_Avfs; + + QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve + DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb + DroopInt_t dBtcGbGfxDfll; // GHz->V BtcGb + DroopInt_t dBtcGbSoc; // GHz->V BtcGb + LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V + + PiecewiseLinearDroopInt_t PiecewiseLinearDroopIntGfxDfll; //GHz ->Vstore in IEEE float format + + QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V + + uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_GfxBtc[2]; + + uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 + uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2 + + // SECTION: XGMI + uint8_t XgmiDpmPstates[NUM_XGMI_LEVELS]; // 2 DPM states, high and low. 0-P0, 1-P1, 2-P2, 3-P3. + uint8_t XgmiDpmSpare[2]; + + // SECTION: Advanced Options + uint32_t DebugOverrides; + QuadraticInt_t ReservedEquation0; + QuadraticInt_t ReservedEquation1; + QuadraticInt_t ReservedEquation2; + QuadraticInt_t ReservedEquation3; + + // SECTION: Sku Reserved + uint8_t CustomerVariant; + + //VC BTC parameters are only applicable to VDD_GFX domain + uint8_t VcBtcEnabled; + uint16_t VcBtcVminT0; // T0_VMIN + uint16_t VcBtcFixedVminAgingOffset; // FIXED_VMIN_AGING_OFFSET + uint16_t VcBtcVmin2PsmDegrationGb; // VMIN_TO_PSM_DEGRADATION_GB + uint32_t VcBtcPsmA; // A_PSM + uint32_t VcBtcPsmB; // B_PSM + uint32_t VcBtcVminA; // A_VMIN + uint32_t VcBtcVminB; // B_VMIN + + //GPIO Board feature + uint16_t LedGpio; //GeneriA GPIO flag used to control the radeon LEDs + uint16_t GfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages + + uint32_t SkuReserved[16]; + + + + // MAJOR SECTION: BOARD PARAMETERS + + //SECTION: Gaming Clocks + uint32_t GamingClk[6]; + + // SECTION: I2C Control + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + uint8_t GpioScl; // GPIO Number for SCL Line, used only for CKSVII2C1 + uint8_t GpioSda; // GPIO Number for SDA Line, used only for CKSVII2C1 + uint8_t FchUsbPdSlaveAddr; //For requesting USB PD controller S-states via FCH I2C when entering PME turn off + uint8_t I2cSpare[1]; + + // SECTION: SVI2 Board Parameters + uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields + + uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t VddciUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t MvddUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + + // SECTION: Telemetry Settings + uint16_t GfxMaxCurrent; // in Amps + int8_t GfxOffset; // in Amps + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; // in Amps + int8_t SocOffset; // in Amps + uint8_t Padding_TelemetrySoc; + + uint16_t Mem0MaxCurrent; // in Amps + int8_t Mem0Offset; // in Amps + uint8_t Padding_TelemetryMem0; + + uint16_t Mem1MaxCurrent; // in Amps + int8_t Mem1Offset; // in Amps + uint8_t Padding_TelemetryMem1; + + uint32_t MvddRatio; // This is used for MVDD Svi2 Div Ratio workaround. It has 16 fractional bits (Q16.16) + + // SECTION: GPIO Settings + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + + uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event + uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event + uint8_t GthrGpio; // GPIO pin configured for GTHR Event + uint8_t GthrPolarity; // replace GPIO polarity for GTHR + + // LED Display Settings + uint8_t LedPin0; // GPIO number for LedPin[0] + uint8_t LedPin1; // GPIO number for LedPin[1] + uint8_t LedPin2; // GPIO number for LedPin[2] + uint8_t LedEnableMask; + + uint8_t LedPcie; // GPIO number for PCIE results + uint8_t LedError; // GPIO number for Error Cases + uint8_t LedSpare1[2]; + + // SECTION: Clock Spread Spectrum + + // GFXCLK PLL Spread Spectrum + uint8_t PllGfxclkSpreadEnabled; // on or off + uint8_t PllGfxclkSpreadPercent; // Q4.4 + uint16_t PllGfxclkSpreadFreq; // kHz + + // GFXCLK DFLL Spread Spectrum + uint8_t DfllGfxclkSpreadEnabled; // on or off + uint8_t DfllGfxclkSpreadPercent; // Q4.4 + uint16_t DfllGfxclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint16_t UclkSpreadPadding; + uint16_t UclkSpreadFreq; // kHz + + // FCLK Spread Spectrum + uint8_t FclkSpreadEnabled; // on or off + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // Section: Memory Config + uint32_t MemoryChannelEnabled; // For DRAM use only, Max 32 channels enabled bit mask. + + uint8_t DramBitWidth; // For DRAM use only. See Dram Bit width type defines + uint8_t PaddingMem1[3]; + + // Section: Total Board Power + uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power + uint16_t BoardPowerPadding; + + // SECTION: XGMI Training + uint8_t XgmiLinkSpeed [NUM_XGMI_PSTATE_LEVELS]; + uint8_t XgmiLinkWidth [NUM_XGMI_PSTATE_LEVELS]; + + uint16_t XgmiFclkFreq [NUM_XGMI_PSTATE_LEVELS]; + uint16_t XgmiSocVoltage [NUM_XGMI_PSTATE_LEVELS]; + + // SECTION: UMC feature flags + uint8_t HsrEnabled; + uint8_t VddqOffEnabled; + uint8_t PaddingUmcFlags[2]; + + // UCLK Spread Spectrum + uint8_t UclkSpreadPercent[16]; + + // SECTION: Board Reserved + uint32_t BoardReserved[11]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; // SMU internal use + + +} PPTable_beige_goby_t; + +typedef struct { // Time constant parameters for clock averages in ms uint16_t GfxclkAverageLpfTau; uint16_t FclkAverageLpfTau; @@ -1265,4 +1626,5 @@ typedef struct { // These defines are used with the SMC_MSG_SetUclkFastSwitch message. #define UCLK_SWITCH_SLOW 0 #define UCLK_SWITCH_FAST 1 +#define UCLK_SWITCH_DUMMY 2 #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h index d23533bda002..a017983ff1fa 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h +++ b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h @@ -64,7 +64,7 @@ #define FEATURE_SMUIO_CG_BIT 28 #define FEATURE_THM_CG_BIT 29 #define FEATURE_CLK_CG_BIT 30 -#define FEATURE_SPARE_31_BIT 31 +#define FEATURE_EDC_BIT 31 #define FEATURE_SPARE_32_BIT 32 #define FEATURE_SPARE_33_BIT 33 #define FEATURE_SPARE_34_BIT 34 @@ -439,8 +439,11 @@ typedef struct { int8_t XgmiOffset; // in Amps uint8_t Padding_TelemetryXgmi; + uint16_t EdcPowerLimit; + uint16_t spare6; + //reserved - uint32_t reserved[15]; + uint32_t reserved[14]; } PPTable_t; diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index bb55a96f98e9..a3b28979bc82 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -34,6 +34,7 @@ #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE #define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03 #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF +#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0x9 /* MP Apertures */ #define MP0_Public 0x03800000 diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h index 8145e1cbf181..1687709507b3 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -26,7 +26,7 @@ #include "amdgpu_smu.h" #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF -#define SMU13_DRIVER_IF_VERSION_ALDE 0x6 +#define SMU13_DRIVER_IF_VERSION_ALDE 0x07 /* MP Apertures */ #define MP0_Public 0x03800000 diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index ee6340c6f921..c73504e998e5 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1651,6 +1651,26 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state) return 0; } +static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size) +{ + struct pp_hwmgr *hwmgr = handle; + struct amdgpu_device *adev = hwmgr->adev; + + if (!addr || !size) + return -EINVAL; + + *addr = NULL; + *size = 0; + mutex_lock(&hwmgr->smu_lock); + if (adev->pm.smu_prv_buffer) { + amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr); + *size = adev->pm.smu_prv_buffer_size; + } + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + static const struct amd_pm_funcs pp_dpm_funcs = { .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, @@ -1714,4 +1734,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .set_xgmi_pstate = pp_set_xgmi_pstate, .get_gpu_metrics = pp_get_gpu_metrics, .gfx_state_change_set = pp_gfx_state_change_set, + .get_smu_prv_buf_details = pp_get_prv_buffer_details, }; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index b1038d30c8dc..f503e61faa60 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -275,7 +275,7 @@ static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3( } /** - * atomctrl_get_memory_pll_dividers_si(). + * atomctrl_get_memory_pll_dividers_si * * @hwmgr: input parameter: pointer to HwMgr * @clock_value: input parameter: memory clock @@ -328,7 +328,7 @@ int atomctrl_get_memory_pll_dividers_si( } /** - * atomctrl_get_memory_pll_dividers_vi(). + * atomctrl_get_memory_pll_dividers_vi * * @hwmgr: input parameter: pointer to HwMgr * @clock_value: input parameter: memory clock @@ -1104,7 +1104,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( } /** - * atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table. + * atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table. * @hwmgr: input: pointer to hwManager * @voltage_type: input: type of EVV voltage VDDC or VDDGFX * @sclk: input: in 10Khz unit. DPM state SCLK frequency @@ -1144,7 +1144,7 @@ int atomctrl_get_voltage_evv_on_sclk( } /** - * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table. + * atomctrl_get_voltage_evv: gets voltage via call to ATOM COMMAND table. * @hwmgr: input: pointer to hwManager * @virtual_voltage_id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 * @voltage: output: real voltage level in unit of mv diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c index 2a28c9df15a0..8d99c7a5abf8 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c @@ -85,7 +85,7 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) return 0; if (state == BACO_STATE_IN) { - if (!ras || !ras->supported) { + if (!ras || !adev->ras_enabled) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); data |= 0x80000000; WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c index 26a5321e621b..15c0b8af376f 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c @@ -4817,70 +4817,70 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev, u32 reg; int ret; - table->initialState.levels[0].mclk.vDLL_CNTL = + table->initialState.level.mclk.vDLL_CNTL = cpu_to_be32(si_pi->clock_registers.dll_cntl); - table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL = + table->initialState.level.mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl); - table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = + table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl); - table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = + table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl); - table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL = + table->initialState.level.mclk.vMPLL_FUNC_CNTL = cpu_to_be32(si_pi->clock_registers.mpll_func_cntl); - table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = + table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 = cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1); - table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = + table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 = cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2); - table->initialState.levels[0].mclk.vMPLL_SS = + table->initialState.level.mclk.vMPLL_SS = cpu_to_be32(si_pi->clock_registers.mpll_ss1); - table->initialState.levels[0].mclk.vMPLL_SS2 = + table->initialState.level.mclk.vMPLL_SS2 = cpu_to_be32(si_pi->clock_registers.mpll_ss2); - table->initialState.levels[0].mclk.mclk_value = + table->initialState.level.mclk.mclk_value = cpu_to_be32(initial_state->performance_levels[0].mclk); - table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = + table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl); - table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = + table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2); - table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = + table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3); - table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = + table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4); - table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = + table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum); - table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = + table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2); - table->initialState.levels[0].sclk.sclk_value = + table->initialState.level.sclk.sclk_value = cpu_to_be32(initial_state->performance_levels[0].sclk); - table->initialState.levels[0].arbRefreshState = + table->initialState.level.arbRefreshState = SISLANDS_INITIAL_STATE_ARB_INDEX; - table->initialState.levels[0].ACIndex = 0; + table->initialState.level.ACIndex = 0; ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, initial_state->performance_levels[0].vddc, - &table->initialState.levels[0].vddc); + &table->initialState.level.vddc); if (!ret) { u16 std_vddc; ret = si_get_std_voltage_value(adev, - &table->initialState.levels[0].vddc, + &table->initialState.level.vddc, &std_vddc); if (!ret) si_populate_std_voltage_value(adev, std_vddc, - table->initialState.levels[0].vddc.index, - &table->initialState.levels[0].std_vddc); + table->initialState.level.vddc.index, + &table->initialState.level.std_vddc); } if (eg_pi->vddci_control) si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, initial_state->performance_levels[0].vddci, - &table->initialState.levels[0].vddci); + &table->initialState.level.vddci); if (si_pi->vddc_phase_shed_control) si_populate_phase_shedding_value(adev, @@ -4888,41 +4888,41 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev, initial_state->performance_levels[0].vddc, initial_state->performance_levels[0].sclk, initial_state->performance_levels[0].mclk, - &table->initialState.levels[0].vddc); + &table->initialState.level.vddc); - si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd); + si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd); reg = CG_R(0xffff) | CG_L(0); - table->initialState.levels[0].aT = cpu_to_be32(reg); - table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); - table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; + table->initialState.level.aT = cpu_to_be32(reg); + table->initialState.level.bSP = cpu_to_be32(pi->dsp); + table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen; if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { - table->initialState.levels[0].strobeMode = + table->initialState.level.strobeMode = si_get_strobe_mode_settings(adev, initial_state->performance_levels[0].mclk); if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold) - table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG; + table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG; else - table->initialState.levels[0].mcFlags = 0; + table->initialState.level.mcFlags = 0; } table->initialState.levelCount = 1; table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; - table->initialState.levels[0].dpm2.MaxPS = 0; - table->initialState.levels[0].dpm2.NearTDPDec = 0; - table->initialState.levels[0].dpm2.AboveSafeInc = 0; - table->initialState.levels[0].dpm2.BelowSafeInc = 0; - table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0; + table->initialState.level.dpm2.MaxPS = 0; + table->initialState.level.dpm2.NearTDPDec = 0; + table->initialState.level.dpm2.AboveSafeInc = 0; + table->initialState.level.dpm2.BelowSafeInc = 0; + table->initialState.level.dpm2.PwrEfficiencyRatio = 0; reg = MIN_POWER_MASK | MAX_POWER_MASK; - table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg); + table->initialState.level.SQPowerThrottle = cpu_to_be32(reg); reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; - table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); + table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg); return 0; } @@ -4953,18 +4953,18 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, if (pi->acpi_vddc) { ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - pi->acpi_vddc, &table->ACPIState.levels[0].vddc); + pi->acpi_vddc, &table->ACPIState.level.vddc); if (!ret) { u16 std_vddc; ret = si_get_std_voltage_value(adev, - &table->ACPIState.levels[0].vddc, &std_vddc); + &table->ACPIState.level.vddc, &std_vddc); if (!ret) si_populate_std_voltage_value(adev, std_vddc, - table->ACPIState.levels[0].vddc.index, - &table->ACPIState.levels[0].std_vddc); + table->ACPIState.level.vddc.index, + &table->ACPIState.level.std_vddc); } - table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen; + table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen; if (si_pi->vddc_phase_shed_control) { si_populate_phase_shedding_value(adev, @@ -4972,23 +4972,23 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, pi->acpi_vddc, 0, 0, - &table->ACPIState.levels[0].vddc); + &table->ACPIState.level.vddc); } } else { ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc); + pi->min_vddc_in_table, &table->ACPIState.level.vddc); if (!ret) { u16 std_vddc; ret = si_get_std_voltage_value(adev, - &table->ACPIState.levels[0].vddc, &std_vddc); + &table->ACPIState.level.vddc, &std_vddc); if (!ret) si_populate_std_voltage_value(adev, std_vddc, - table->ACPIState.levels[0].vddc.index, - &table->ACPIState.levels[0].std_vddc); + table->ACPIState.level.vddc.index, + &table->ACPIState.level.std_vddc); } - table->ACPIState.levels[0].gen2PCIE = + table->ACPIState.level.gen2PCIE = (u8)amdgpu_get_pcie_gen_support(adev, si_pi->sys_pcie_mask, si_pi->boot_pcie_gen, @@ -5000,14 +5000,14 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, pi->min_vddc_in_table, 0, 0, - &table->ACPIState.levels[0].vddc); + &table->ACPIState.level.vddc); } if (pi->acpi_vddc) { if (eg_pi->acpi_vddci) si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, eg_pi->acpi_vddci, - &table->ACPIState.levels[0].vddci); + &table->ACPIState.level.vddci); } mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; @@ -5018,59 +5018,59 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; spll_func_cntl_2 |= SCLK_MUX_SEL(4); - table->ACPIState.levels[0].mclk.vDLL_CNTL = + table->ACPIState.level.mclk.vDLL_CNTL = cpu_to_be32(dll_cntl); - table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = + table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); - table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = + table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); - table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = + table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); - table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL = + table->ACPIState.level.mclk.vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); - table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = + table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1); - table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = + table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2); - table->ACPIState.levels[0].mclk.vMPLL_SS = + table->ACPIState.level.mclk.vMPLL_SS = cpu_to_be32(si_pi->clock_registers.mpll_ss1); - table->ACPIState.levels[0].mclk.vMPLL_SS2 = + table->ACPIState.level.mclk.vMPLL_SS2 = cpu_to_be32(si_pi->clock_registers.mpll_ss2); - table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = + table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); - table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = + table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); - table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = + table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); - table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = + table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4); - table->ACPIState.levels[0].mclk.mclk_value = 0; - table->ACPIState.levels[0].sclk.sclk_value = 0; + table->ACPIState.level.mclk.mclk_value = 0; + table->ACPIState.level.sclk.sclk_value = 0; - si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd); + si_populate_mvdd_value(adev, 0, &table->ACPIState.level.mvdd); if (eg_pi->dynamic_ac_timing) - table->ACPIState.levels[0].ACIndex = 0; + table->ACPIState.level.ACIndex = 0; - table->ACPIState.levels[0].dpm2.MaxPS = 0; - table->ACPIState.levels[0].dpm2.NearTDPDec = 0; - table->ACPIState.levels[0].dpm2.AboveSafeInc = 0; - table->ACPIState.levels[0].dpm2.BelowSafeInc = 0; - table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0; + table->ACPIState.level.dpm2.MaxPS = 0; + table->ACPIState.level.dpm2.NearTDPDec = 0; + table->ACPIState.level.dpm2.AboveSafeInc = 0; + table->ACPIState.level.dpm2.BelowSafeInc = 0; + table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0; reg = MIN_POWER_MASK | MAX_POWER_MASK; - table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg); + table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg); reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; - table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); + table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg); return 0; } static int si_populate_ulv_state(struct amdgpu_device *adev, - SISLANDS_SMC_SWSTATE *state) + struct SISLANDS_SMC_SWSTATE_SINGLE *state) { struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); struct si_power_info *si_pi = si_get_pi(adev); @@ -5079,19 +5079,19 @@ static int si_populate_ulv_state(struct amdgpu_device *adev, int ret; ret = si_convert_power_level_to_smc(adev, &ulv->pl, - &state->levels[0]); + &state->level); if (!ret) { if (eg_pi->sclk_deep_sleep) { if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) - state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; + state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; else - state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; + state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; } if (ulv->one_pcie_lane_in_ulv) state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1; - state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX); - state->levels[0].ACIndex = 1; - state->levels[0].std_vddc = state->levels[0].vddc; + state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX); + state->level.ACIndex = 1; + state->level.std_vddc = state->level.vddc; state->levelCount = 1; state->flags |= PPSMC_SWSTATE_FLAG_DC; @@ -5190,7 +5190,9 @@ static int si_init_smc_table(struct amdgpu_device *adev) if (ret) return ret; - table->driverState = table->initialState; + table->driverState.flags = table->initialState.flags; + table->driverState.levelCount = table->initialState.levelCount; + table->driverState.levels[0] = table->initialState.level; ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state, SISLANDS_INITIAL_STATE_ARB_INDEX); @@ -5737,8 +5739,8 @@ static int si_upload_ulv_state(struct amdgpu_device *adev) if (ulv->supported && ulv->pl.vddc) { u32 address = si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, ULVState); - SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState; - u32 state_size = sizeof(SISLANDS_SMC_SWSTATE); + struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState; + u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE); memset(smc_state, 0, state_size); diff --git a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h index 0f7554052c90..c7dc117a688c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h +++ b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h @@ -191,6 +191,14 @@ struct SISLANDS_SMC_SWSTATE typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE; +struct SISLANDS_SMC_SWSTATE_SINGLE { + uint8_t flags; + uint8_t levelCount; + uint8_t padding2; + uint8_t padding3; + SISLANDS_SMC_HW_PERFORMANCE_LEVEL level; +}; + #define SISLANDS_SMC_VOLTAGEMASK_VDDC 0 #define SISLANDS_SMC_VOLTAGEMASK_MVDD 1 #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2 @@ -208,19 +216,19 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE; struct SISLANDS_SMC_STATETABLE { - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS]; - SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; - SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable; - PP_SIslands_DPM2Parameters dpm2Params; - SISLANDS_SMC_SWSTATE initialState; - SISLANDS_SMC_SWSTATE ACPIState; - SISLANDS_SMC_SWSTATE ULVState; - SISLANDS_SMC_SWSTATE driverState; - SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; + uint8_t thermalProtectType; + uint8_t systemFlags; + uint8_t maxVDDCIndexInPPTable; + uint8_t extraFlags; + uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS]; + SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; + SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable; + PP_SIslands_DPM2Parameters dpm2Params; + struct SISLANDS_SMC_SWSTATE_SINGLE initialState; + struct SISLANDS_SMC_SWSTATE_SINGLE ACPIState; + struct SISLANDS_SMC_SWSTATE_SINGLE ULVState; + SISLANDS_SMC_SWSTATE driverState; + SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; }; typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index c29d8b3131b7..285849cef9f2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -570,6 +570,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: sienna_cichlid_set_ppt_funcs(smu); break; case CHIP_ALDEBARAN: @@ -686,7 +687,8 @@ static int smu_late_init(void *handle) return ret; } - smu_get_unique_id(smu); + if (!amdgpu_sriov_vf(adev)) + smu_get_unique_id(smu); smu_get_fan_parameters(smu); @@ -2933,6 +2935,26 @@ int smu_set_light_sbr(struct smu_context *smu, bool enable) return ret; } +static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) +{ + struct smu_context *smu = handle; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *memory_pool = &smu_table->memory_pool; + + if (!addr || !size) + return -EINVAL; + + *addr = NULL; + *size = 0; + mutex_lock(&smu->mutex); + if (memory_pool->bo) { + *addr = memory_pool->cpu_addr; + *size = memory_pool->size; + } + mutex_unlock(&smu->mutex); + + return 0; +} static const struct amd_pm_funcs swsmu_pm_funcs = { /* export for sysfs */ @@ -2984,6 +3006,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, .load_firmware = smu_load_microcode, .gfx_state_change_set = smu_gfx_state_change_set, + .get_smu_prv_buf_details = smu_get_prv_buffer_details, }; int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index d2fd44b903ca..0c40a54c46d7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -73,6 +73,21 @@ #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15 +#define GET_PPTABLE_MEMBER(field, member) do {\ + if (smu->adev->asic_type == CHIP_BEIGE_GOBY)\ + (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_beige_goby_t, field));\ + else\ + (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\ +} while(0) + +static int get_table_size(struct smu_context *smu) +{ + if (smu->adev->asic_type == CHIP_BEIGE_GOBY) + return sizeof(PPTable_beige_goby_t); + else + return sizeof(PPTable_t); +} + static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -302,7 +317,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu, if (smu->dc_controlled_by_gpio) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT); - if (amdgpu_aspm == 1) + if (amdgpu_aspm) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT); return 0; @@ -336,10 +351,9 @@ static int sienna_cichlid_check_powerplay_table(struct smu_context *smu) static int sienna_cichlid_append_powerplay_table(struct smu_context *smu) { - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *smc_pptable = table_context->driver_pptable; struct atom_smc_dpm_info_v4_9 *smc_dpm_table; int index, ret; + I2cControllerConfig_t *table_member; index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, smc_dpm_info); @@ -348,9 +362,9 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu) (uint8_t **)&smc_dpm_table); if (ret) return ret; - - memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers, - sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header)); + GET_PPTABLE_MEMBER(I2cControllers, &table_member); + memcpy(table_member, smc_dpm_table->I2cControllers, + sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header)); return 0; } @@ -360,9 +374,11 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu) struct smu_table_context *table_context = &smu->smu_table; struct smu_11_0_7_powerplay_table *powerplay_table = table_context->power_play_table; + int table_size; + table_size = get_table_size(smu); memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, - sizeof(PPTable_t)); + table_size); return 0; } @@ -394,9 +410,11 @@ static int sienna_cichlid_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; + int table_size; - SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + table_size = get_table_size(smu); + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), @@ -572,13 +590,14 @@ static int sienna_cichlid_init_smc_tables(struct smu_context *smu) static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) { struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; - PPTable_t *driver_ppt = smu->smu_table.driver_pptable; struct smu_11_0_dpm_table *dpm_table; struct amdgpu_device *adev = smu->adev; int ret = 0; + DpmDescriptor_t *table_member; /* socclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.soc_table; + GET_PPTABLE_MEMBER(DpmDescriptor, &table_member); if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { ret = smu_v11_0_set_single_dpm_table(smu, SMU_SOCCLK, @@ -586,7 +605,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete; + !table_member[PPCLK_SOCCLK].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; @@ -604,7 +623,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete; + !table_member[PPCLK_GFXCLK].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; @@ -622,7 +641,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete; + !table_member[PPCLK_UCLK].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; @@ -640,7 +659,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete; + !table_member[PPCLK_FCLK].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; @@ -658,7 +677,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete; + !table_member[PPCLK_VCLK_0].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; @@ -677,7 +696,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete; + !table_member[PPCLK_VCLK_1].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = @@ -697,7 +716,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete; + !table_member[PPCLK_DCLK_0].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; @@ -716,7 +735,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete; + !table_member[PPCLK_DCLK_1].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = @@ -736,7 +755,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete; + !table_member[PPCLK_DCEFCLK].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100; @@ -754,7 +773,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete; + !table_member[PPCLK_PIXCLK].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100; @@ -772,7 +791,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete; + !table_member[PPCLK_DISPCLK].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100; @@ -790,7 +809,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) if (ret) return ret; dpm_table->is_fine_grained = - !driver_ppt->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete; + !table_member[PPCLK_PHYCLK].SnapToDiscrete; } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100; @@ -911,14 +930,15 @@ static int sienna_cichlid_get_current_clk_freq_by_table(struct smu_context *smu, static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) { - PPTable_t *pptable = smu->smu_table.driver_pptable; DpmDescriptor_t *dpm_desc = NULL; + DpmDescriptor_t *table_member; uint32_t clk_index = 0; + GET_PPTABLE_MEMBER(DpmDescriptor, &table_member); clk_index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_CLK, clk_type); - dpm_desc = &pptable->DpmDescriptor[clk_index]; + dpm_desc = &table_member[clk_index]; /* 0 - Fine grained DPM, 1 - Discrete DPM */ return dpm_desc->SnapToDiscrete == 0; @@ -947,7 +967,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, struct smu_table_context *table_context = &smu->smu_table; struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context; - PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable; + uint16_t *table_member; + struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings; OverDriveTable_t *od_table = (OverDriveTable_t *)table_context->overdrive_table; @@ -1016,6 +1037,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, case SMU_PCIE: gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu); lane_width = smu_v11_0_get_current_pcie_link_width_level(smu); + GET_PPTABLE_MEMBER(LclkFreq, &table_member); for (i = 0; i < NUM_LINK_LEVELS; i++) size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," : @@ -1028,7 +1050,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" : (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" : (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "", - pptable->LclkFreq[i], + table_member[i], (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) && (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ? "*" : ""); @@ -1275,9 +1297,10 @@ static int sienna_cichlid_get_fan_speed_percent(struct smu_context *smu, static int sienna_cichlid_get_fan_parameters(struct smu_context *smu) { - PPTable_t *pptable = smu->smu_table.driver_pptable; + uint16_t *table_member; - smu->fan_max_rpm = pptable->FanMaximumRpm; + GET_PPTABLE_MEMBER(FanMaximumRpm, &table_member); + smu->fan_max_rpm = *table_member; return 0; } @@ -1568,8 +1591,7 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu, void *data, uint32_t *size) { int ret = 0; - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; + uint16_t *temp; if(!data || !size) return -EINVAL; @@ -1577,7 +1599,8 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu, mutex_lock(&smu->sensor_lock); switch (sensor) { case AMDGPU_PP_SENSOR_MAX_FAN_RPM: - *(uint32_t *)data = pptable->FanMaximumRpm; + GET_PPTABLE_MEMBER(FanMaximumRpm, &temp); + *(uint16_t *)data = *temp; *size = 4; break; case AMDGPU_PP_SENSOR_MEM_LOAD: @@ -1645,14 +1668,16 @@ static int sienna_cichlid_get_uclk_dpm_states(struct smu_context *smu, uint32_t uint16_t *dpm_levels = NULL; uint16_t i = 0; struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *driver_ppt = NULL; + DpmDescriptor_t *table_member1; + uint16_t *table_member2; if (!clocks_in_khz || !num_states || !table_context->driver_pptable) return -EINVAL; - driver_ppt = table_context->driver_pptable; - num_discrete_levels = driver_ppt->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels; - dpm_levels = driver_ppt->FreqTableUclk; + GET_PPTABLE_MEMBER(DpmDescriptor, &table_member1); + num_discrete_levels = table_member1[PPCLK_UCLK].NumDiscreteLevels; + GET_PPTABLE_MEMBER(FreqTableUclk, &table_member2); + dpm_levels = table_member2; if (num_discrete_levels == 0 || dpm_levels == NULL) return -EINVAL; @@ -1674,25 +1699,29 @@ static int sienna_cichlid_get_thermal_temperature_range(struct smu_context *smu, struct smu_table_context *table_context = &smu->smu_table; struct smu_11_0_7_powerplay_table *powerplay_table = table_context->power_play_table; - PPTable_t *pptable = smu->smu_table.driver_pptable; + uint16_t *table_member; + uint16_t temp_edge, temp_hotspot, temp_mem; if (!range) return -EINVAL; memcpy(range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range)); - range->max = pptable->TemperatureLimit[TEMP_EDGE] * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->edge_emergency_max = (pptable->TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_crit_max = pptable->TemperatureLimit[TEMP_HOTSPOT] * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_emergency_max = (pptable->TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) * + GET_PPTABLE_MEMBER(TemperatureLimit, &table_member); + temp_edge = table_member[TEMP_EDGE]; + temp_hotspot = table_member[TEMP_HOTSPOT]; + temp_mem = table_member[TEMP_MEM]; + + range->max = temp_edge * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->edge_emergency_max = (temp_edge + CTF_OFFSET_EDGE) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_crit_max = pptable->TemperatureLimit[TEMP_MEM] * + range->hotspot_crit_max = temp_hotspot * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->hotspot_emergency_max = (temp_hotspot + CTF_OFFSET_HOTSPOT) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_emergency_max = (pptable->TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* + range->mem_crit_max = temp_mem * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_emergency_max = (temp_mem + CTF_OFFSET_MEM)* SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->software_shutdown_temp = powerplay_table->software_shutdown_temp; return 0; @@ -1726,17 +1755,14 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu) { struct smu_11_0_7_powerplay_table *powerplay_table = (struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table; - PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t power_limit, od_percent; + uint16_t *table_member; + + GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member); if (smu_v11_0_get_current_power_limit(smu, &power_limit)) { - /* the last hope to figure out the ppt limit */ - if (!pptable) { - dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!"); - return -EINVAL; - } power_limit = - pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; + table_member[PPT_THROTTLER_PPT0]; } smu->current_power_limit = smu->default_power_limit = power_limit; @@ -1758,36 +1784,39 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint32_t pcie_width_cap) { struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; - PPTable_t *pptable = smu->smu_table.driver_pptable; + uint32_t smu_pcie_arg; + uint8_t *table_member1, *table_member2; int ret, i; + GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); + GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); + /* lclk dpm table setup */ for (i = 0; i < MAX_PCIE_CONF; i++) { - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pptable->PcieGenSpeed[i]; - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pptable->PcieLaneCount[i]; + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i]; + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i]; } for (i = 0; i < NUM_LINK_LEVELS; i++) { smu_pcie_arg = (i << 16) | - ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? - (pptable->PcieGenSpeed[i] << 8) : - (pcie_gen_cap << 8)) | - ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? - pptable->PcieLaneCount[i] : - pcie_width_cap); + ((table_member1[i] <= pcie_gen_cap) ? + (table_member1[i] << 8) : + (pcie_gen_cap << 8)) | + ((table_member2[i] <= pcie_width_cap) ? + table_member2[i] : + pcie_width_cap); ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); if (ret) return ret; - if (pptable->PcieGenSpeed[i] > pcie_gen_cap) + if (table_member1[i] > pcie_gen_cap) dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (pptable->PcieLaneCount[i] > pcie_width_cap) + if (table_member2[i] > pcie_width_cap) dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; } @@ -2086,12 +2115,646 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu) return val != 0x0; } +static void beige_goby_dump_pptable(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_beige_goby_t *pptable = table_context->driver_pptable; + int i; + + dev_info(smu->adev->dev, "Dumped PPTable:\n"); + + dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); + dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); + dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); + + for (i = 0; i < PPT_THROTTLER_COUNT; i++) { + dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]); + dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]); + dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]); + dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]); + } + + for (i = 0; i < TDC_THROTTLER_COUNT; i++) { + dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]); + dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]); + } + + for (i = 0; i < TEMP_COUNT; i++) { + dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]); + } + + dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit); + dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig); + dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]); + dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]); + dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]); + + dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit); + for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) { + dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]); + dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]); + } + dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask); + + dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask); + + dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc); + dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx); + dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx); + dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc); + + dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin); + + dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold); + + dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx); + dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc); + dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx); + dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc); + + dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx); + dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc); + + dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin); + dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin); + dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp); + dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp); + dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp); + dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp); + dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis); + dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis); + + dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_GFXCLK].Padding, + pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, + pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); + + dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_SOCCLK].Padding, + pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, + pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); + + dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_UCLK].Padding, + pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, + pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, + pptable->DpmDescriptor[PPCLK_UCLK].Padding16); + + dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_FCLK].Padding, + pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, + pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, + pptable->DpmDescriptor[PPCLK_FCLK].Padding16); + + dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode, + pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_DCLK_0].Padding, + pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a, + pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b, + pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c, + pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin, + pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16); + + dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode, + pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_VCLK_0].Padding, + pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a, + pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b, + pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c, + pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin, + pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16); + + dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode, + pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_DCLK_1].Padding, + pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a, + pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b, + pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c, + pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin, + pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16); + + dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode, + pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_VCLK_1].Padding, + pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a, + pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b, + pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c, + pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin, + pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16); + + dev_info(smu->adev->dev, "FreqTableGfx\n"); + for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]); + + dev_info(smu->adev->dev, "FreqTableVclk\n"); + for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]); + + dev_info(smu->adev->dev, "FreqTableDclk\n"); + for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]); + + dev_info(smu->adev->dev, "FreqTableSocclk\n"); + for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]); + + dev_info(smu->adev->dev, "FreqTableUclk\n"); + for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]); + + dev_info(smu->adev->dev, "FreqTableFclk\n"); + for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]); + + dev_info(smu->adev->dev, "DcModeMaxFreq\n"); + dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); + dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); + dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); + dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); + dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]); + dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]); + dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]); + dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]); + + dev_info(smu->adev->dev, "FreqTableUclkDiv\n"); + for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]); + + dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq); + dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding); + + dev_info(smu->adev->dev, "Mp0clkFreq\n"); + for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]); + + dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); + for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]); + + dev_info(smu->adev->dev, "MemVddciVoltage\n"); + for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]); + + dev_info(smu->adev->dev, "MemMvddVoltage\n"); + for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]); + + dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry); + dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit); + dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); + dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); + dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding); + + dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask); + + dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask); + dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask); + dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]); + dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow); + dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]); + dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]); + dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]); + dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]); + dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt); + dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt); + dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt); + + dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage); + dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime); + dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime); + dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum); + dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis); + dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout); + + dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]); + dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]); + dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]); + dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]); + dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]); + + dev_info(smu->adev->dev, "FlopsPerByteTable\n"); + for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]); + + dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv); + dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]); + dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]); + dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]); + + dev_info(smu->adev->dev, "UclkDpmPstates\n"); + for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]); + + dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n"); + dev_info(smu->adev->dev, " .Fmin = 0x%x\n", + pptable->UclkDpmSrcFreqRange.Fmin); + dev_info(smu->adev->dev, " .Fmax = 0x%x\n", + pptable->UclkDpmSrcFreqRange.Fmax); + dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n"); + dev_info(smu->adev->dev, " .Fmin = 0x%x\n", + pptable->UclkDpmTargFreqRange.Fmin); + dev_info(smu->adev->dev, " .Fmax = 0x%x\n", + pptable->UclkDpmTargFreqRange.Fmax); + dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq); + dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding); + + dev_info(smu->adev->dev, "PcieGenSpeed\n"); + for (i = 0; i < NUM_LINK_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]); + + dev_info(smu->adev->dev, "PcieLaneCount\n"); + for (i = 0; i < NUM_LINK_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]); + + dev_info(smu->adev->dev, "LclkFreq\n"); + for (i = 0; i < NUM_LINK_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]); + + dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp); + dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp); + + dev_info(smu->adev->dev, "FanGain\n"); + for (i = 0; i < TEMP_COUNT; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]); + + dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin); + dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm); + dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm); + dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm); + dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm); + dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature); + dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk); + dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16); + dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect); + dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding); + dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable); + dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev); + + dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta); + dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta); + dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta); + dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved); + + dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); + dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); + dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect); + dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs); + + dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, + pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, + pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); + dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, + pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, + pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); + dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->dBtcGbGfxPll.a, + pptable->dBtcGbGfxPll.b, + pptable->dBtcGbGfxPll.c); + dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->dBtcGbGfxDfll.a, + pptable->dBtcGbGfxDfll.b, + pptable->dBtcGbGfxDfll.c); + dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->dBtcGbSoc.a, + pptable->dBtcGbSoc.b, + pptable->dBtcGbSoc.c); + dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", + pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, + pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); + dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", + pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, + pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); + + dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n"); + for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) { + dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n", + i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]); + dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n", + i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]); + } + + dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); + dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); + + dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); + dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); + + dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); + dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); + dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); + dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); + + dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); + dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); + dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); + dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); + + dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); + dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); + + dev_info(smu->adev->dev, "XgmiDpmPstates\n"); + for (i = 0; i < NUM_XGMI_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]); + dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); + dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); + + dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); + dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation0.a, + pptable->ReservedEquation0.b, + pptable->ReservedEquation0.c); + dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation1.a, + pptable->ReservedEquation1.b, + pptable->ReservedEquation1.c); + dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation2.a, + pptable->ReservedEquation2.b, + pptable->ReservedEquation2.c); + dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation3.a, + pptable->ReservedEquation3.b, + pptable->ReservedEquation3.c); + + dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]); + dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]); + dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]); + dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]); + dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]); + dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]); + dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]); + dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]); + + dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]); + dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]); + dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]); + dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]); + dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]); + dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]); + + for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { + dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); + dev_info(smu->adev->dev, " .Enabled = 0x%x\n", + pptable->I2cControllers[i].Enabled); + dev_info(smu->adev->dev, " .Speed = 0x%x\n", + pptable->I2cControllers[i].Speed); + dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", + pptable->I2cControllers[i].SlaveAddress); + dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n", + pptable->I2cControllers[i].ControllerPort); + dev_info(smu->adev->dev, " .ControllerName = 0x%x\n", + pptable->I2cControllers[i].ControllerName); + dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n", + pptable->I2cControllers[i].ThermalThrotter); + dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n", + pptable->I2cControllers[i].I2cProtocol); + dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n", + pptable->I2cControllers[i].PaddingConfig); + } + + dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl); + dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda); + dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr); + dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]); + + dev_info(smu->adev->dev, "Board Parameters:\n"); + dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); + dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); + dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); + dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); + dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); + dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); + dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask); + dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask); + + dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); + dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); + dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); + + dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); + dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); + dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); + + dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); + dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset); + dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); + + dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); + dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset); + dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); + + dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio); + + dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio); + dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity); + dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio); + dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity); + dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio); + dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity); + dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio); + dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity); + dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0); + dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1); + dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2); + dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask); + dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie); + dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError); + dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]); + dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]); + + dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled); + dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent); + dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq); + + dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled); + dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent); + dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq); + + dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding); + dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq); + + dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled); + dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent); + dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq); + + dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled); + dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth); + dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]); + dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]); + dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]); + + dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower); + dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding); + + dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); + for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]); + dev_info(smu->adev->dev, "XgmiLinkWidth\n"); + for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]); + dev_info(smu->adev->dev, "XgmiFclkFreq\n"); + for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]); + dev_info(smu->adev->dev, "XgmiSocVoltage\n"); + for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) + dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]); + + dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled); + dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled); + dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]); + dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]); + + dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]); + dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]); + dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]); + dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]); + dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]); + dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]); + dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]); + dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]); + dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]); + dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]); + dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]); + + dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]); + dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]); + dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]); + dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]); + dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]); + dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]); + dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]); + dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]); +} + static void sienna_cichlid_dump_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; PPTable_t *pptable = table_context->driver_pptable; int i; + if (smu->adev->asic_type == CHIP_BEIGE_GOBY) { + beige_goby_dump_pptable(smu); + return; + } + dev_info(smu->adev->dev, "Dumped PPTable:\n"); dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 6274cae4a065..362696208fd8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -63,6 +63,7 @@ MODULE_FIRMWARE("amdgpu/navi12_smc.bin"); MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin"); MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin"); MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_smc.bin"); #define SMU11_VOLTAGE_SCALE 4 @@ -115,6 +116,9 @@ int smu_v11_0_init_microcode(struct smu_context *smu) case CHIP_DIMGREY_CAVEFISH: chip_name = "dimgrey_cavefish"; break; + case CHIP_BEIGE_GOBY: + chip_name = "beige_goby"; + break; default: dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); return -EINVAL; @@ -259,6 +263,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) case CHIP_DIMGREY_CAVEFISH: smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish; break; + case CHIP_BEIGE_GOBY: + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby; + break; default: dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; @@ -729,7 +736,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) * display num currently */ if (adev->asic_type >= CHIP_NAVY_FLOUNDER && - adev->asic_type <= CHIP_DIMGREY_CAVEFISH) + adev->asic_type <= CHIP_BEIGE_GOBY) return 0; return smu_cmn_send_smc_msg_with_param(smu, @@ -1121,6 +1128,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: case CHIP_VANGOGH: if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; @@ -1531,7 +1539,8 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) NULL); break; default: - if (!ras || !ras->supported || adev->gmc.xgmi.pending_reset) { + if (!ras || !adev->ras_enabled || + adev->gmc.xgmi.pending_reset) { if (adev->asic_type == CHIP_ARCTURUS) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); data |= 0x80000000; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index dcbe3a72da09..7c191a5d6db9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -78,8 +78,6 @@ #define smnPCIE_ESM_CTRL 0x111003D0 -#define CLOCK_VALID (1 << 31) - static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -209,7 +207,7 @@ static int aldebaran_tables_init(struct smu_context *smu) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_2); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) { kfree(smu_table->metrics_table); @@ -455,12 +453,18 @@ static int aldebaran_populate_umd_state_clk(struct smu_context *smu) pstate_table->gfxclk_pstate.min = gfx_table->min; pstate_table->gfxclk_pstate.peak = gfx_table->max; + pstate_table->gfxclk_pstate.curr.min = gfx_table->min; + pstate_table->gfxclk_pstate.curr.max = gfx_table->max; pstate_table->uclk_pstate.min = mem_table->min; pstate_table->uclk_pstate.peak = mem_table->max; + pstate_table->uclk_pstate.curr.min = mem_table->min; + pstate_table->uclk_pstate.curr.max = mem_table->max; pstate_table->socclk_pstate.min = soc_table->min; pstate_table->socclk_pstate.peak = soc_table->max; + pstate_table->socclk_pstate.curr.min = soc_table->min; + pstate_table->socclk_pstate.curr.max = soc_table->max; if (gfx_table->count > ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL && mem_table->count > ALDEBARAN_UMD_PSTATE_MCLK_LEVEL && @@ -669,6 +673,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, { int i, now, size = 0; int ret = 0; + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; struct pp_clock_levels_with_latency clocks; struct smu_13_0_dpm_table *single_dpm_table; struct smu_dpm_context *smu_dpm = &smu->smu_dpm; @@ -703,12 +708,8 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, display_levels = clocks.num_levels; - min_clk = smu->gfx_actual_hard_min_freq & CLOCK_VALID ? - smu->gfx_actual_hard_min_freq & ~CLOCK_VALID : - single_dpm_table->dpm_levels[0].value; - max_clk = smu->gfx_actual_soft_max_freq & CLOCK_VALID ? - smu->gfx_actual_soft_max_freq & ~CLOCK_VALID : - single_dpm_table->dpm_levels[1].value; + min_clk = pstate_table->gfxclk_pstate.curr.min; + max_clk = pstate_table->gfxclk_pstate.curr.max; freq_values[0] = min_clk; freq_values[1] = max_clk; @@ -1128,15 +1129,17 @@ static int aldebaran_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); + struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_13_0_dpm_table *gfx_table = + &dpm_context->dpm_tables.gfx_table; + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; /* Disable determinism if switching to another mode */ - if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) - && (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) + if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) && + (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) { smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL); - - /* Reset user min/max gfx clock */ - smu->gfx_actual_hard_min_freq = 0; - smu->gfx_actual_soft_max_freq = 0; + pstate_table->gfxclk_pstate.curr.max = gfx_table->max; + } switch (level) { @@ -1163,6 +1166,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; struct amdgpu_device *adev = smu->adev; uint32_t min_clk; uint32_t max_clk; @@ -1176,15 +1180,23 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, return -EINVAL; if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - min_clk = max(min, dpm_context->dpm_tables.gfx_table.min); - max_clk = min(max, dpm_context->dpm_tables.gfx_table.max); - ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, - min_clk, max_clk); + if (min >= max) { + dev_err(smu->adev->dev, + "Minimum GFX clk should be less than the maximum allowed clock\n"); + return -EINVAL; + } + if ((min == pstate_table->gfxclk_pstate.curr.min) && + (max == pstate_table->gfxclk_pstate.curr.max)) + return 0; + + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, + min, max); if (!ret) { - smu->gfx_actual_hard_min_freq = min_clk | CLOCK_VALID; - smu->gfx_actual_soft_max_freq = max_clk | CLOCK_VALID; + pstate_table->gfxclk_pstate.curr.min = min; + pstate_table->gfxclk_pstate.curr.max = max; } + return ret; } @@ -1209,10 +1221,8 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, dev_err(adev->dev, "Failed to enable determinism at GFX clock %d MHz\n", max); } else { - smu->gfx_actual_hard_min_freq = - min_clk | CLOCK_VALID; - smu->gfx_actual_soft_max_freq = - max | CLOCK_VALID; + pstate_table->gfxclk_pstate.curr.min = min_clk; + pstate_table->gfxclk_pstate.curr.max = max; } } } @@ -1225,6 +1235,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; uint32_t min_clk; uint32_t max_clk; int ret = 0; @@ -1245,16 +1256,22 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ if (input[1] < dpm_context->dpm_tables.gfx_table.min) { dev_warn(smu->adev->dev, "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n", input[1], dpm_context->dpm_tables.gfx_table.min); + pstate_table->gfxclk_pstate.custom.min = + pstate_table->gfxclk_pstate.curr.min; return -EINVAL; } - smu->gfx_actual_hard_min_freq = input[1]; + + pstate_table->gfxclk_pstate.custom.min = input[1]; } else if (input[0] == 1) { if (input[1] > dpm_context->dpm_tables.gfx_table.max) { dev_warn(smu->adev->dev, "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n", input[1], dpm_context->dpm_tables.gfx_table.max); + pstate_table->gfxclk_pstate.custom.max = + pstate_table->gfxclk_pstate.curr.max; return -EINVAL; } - smu->gfx_actual_soft_max_freq = input[1]; + + pstate_table->gfxclk_pstate.custom.max = input[1]; } else { return -EINVAL; } @@ -1276,8 +1293,17 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ dev_err(smu->adev->dev, "Input parameter number not correct\n"); return -EINVAL; } else { - min_clk = smu->gfx_actual_hard_min_freq; - max_clk = smu->gfx_actual_soft_max_freq; + if (!pstate_table->gfxclk_pstate.custom.min) + pstate_table->gfxclk_pstate.custom.min = + pstate_table->gfxclk_pstate.curr.min; + + if (!pstate_table->gfxclk_pstate.custom.max) + pstate_table->gfxclk_pstate.custom.max = + pstate_table->gfxclk_pstate.curr.max; + + min_clk = pstate_table->gfxclk_pstate.custom.min; + max_clk = pstate_table->gfxclk_pstate.custom.max; + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); } break; @@ -1632,8 +1658,8 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_1 *gpu_metrics = - (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_2 *gpu_metrics = + (struct gpu_metrics_v1_2 *)smu_table->gpu_metrics_table; SmuMetrics_t metrics; int i, ret = 0; @@ -1643,7 +1669,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, if (ret) return ret; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 2); gpu_metrics->temperature_edge = metrics.TemperatureEdge; gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -1657,7 +1683,9 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_mm_activity = 0; gpu_metrics->average_socket_power = metrics.AverageSocketPower; - gpu_metrics->energy_accumulator = 0; + gpu_metrics->energy_accumulator = + (uint64_t)metrics.EnergyAcc64bitHigh << 32 | + metrics.EnergyAcc64bitLow; gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; @@ -1688,9 +1716,12 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, for (i = 0; i < NUM_HBM_INSTANCES; i++) gpu_metrics->temperature_hbm[i] = metrics.TemperatureAllHBM[i]; + gpu_metrics->firmware_timestamp = ((uint64_t)metrics.TimeStampHigh << 32) | + metrics.TimeStampLow; + *table = (void *)gpu_metrics; - return sizeof(struct gpu_metrics_v1_1); + return sizeof(struct gpu_metrics_v1_2); } static int aldebaran_mode2_reset(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 0864083e7435..a3dc7194aaf8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1626,6 +1626,9 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, sclk_max); if (ret) return ret; + + pstate_table->gfxclk_pstate.curr.min = sclk_min; + pstate_table->gfxclk_pstate.curr.max = sclk_max; } if (mclk_min && mclk_max) { @@ -1635,6 +1638,9 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, mclk_max); if (ret) return ret; + + pstate_table->uclk_pstate.curr.min = mclk_min; + pstate_table->uclk_pstate.curr.max = mclk_max; } if (socclk_min && socclk_max) { @@ -1644,6 +1650,9 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, socclk_max); if (ret) return ret; + + pstate_table->socclk_pstate.curr.min = socclk_min; + pstate_table->socclk_pstate.curr.max = socclk_max; } return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index dc7d2e71aa6f..0934e5b3aa17 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -104,8 +104,8 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu, ret = smu_cmn_wait_for_response(smu); if (ret != 0x1) { - dev_err(adev->dev, "Msg issuing pre-check failed and " - "SMU may be not in the right state!\n"); + dev_err(adev->dev, "Msg issuing pre-check failed(0x%x) and " + "SMU may be not in the right state!\n", ret); if (ret != -ETIME) ret = -EIO; return ret; @@ -761,6 +761,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) case METRICS_VERSION(1, 1): structure_size = sizeof(struct gpu_metrics_v1_1); break; + case METRICS_VERSION(1, 2): + structure_size = sizeof(struct gpu_metrics_v1_2); + break; case METRICS_VERSION(2, 0): structure_size = sizeof(struct gpu_metrics_v2_0); break; |