diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
22 files changed, 821 insertions, 422 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 0a6bb3311f0f..03581d5b1836 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -927,7 +927,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block { int ret = 0; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - bool swsmu = is_support_sw_smu(adev); switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: @@ -968,15 +967,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block case AMD_IP_BLOCK_TYPE_GFX: case AMD_IP_BLOCK_TYPE_VCN: case AMD_IP_BLOCK_TYPE_SDMA: - if (pp_funcs && pp_funcs->set_powergating_by_smu) { - ret = (pp_funcs->set_powergating_by_smu( - (adev)->powerplay.pp_handle, block_type, gate)); - } - break; case AMD_IP_BLOCK_TYPE_JPEG: - if (swsmu) - ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); - break; case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: if (pp_funcs && pp_funcs->set_powergating_by_smu) { @@ -1606,7 +1597,10 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio pr_err("smu firmware loading failed\n"); return r; } - *smu_version = adev->pm.fw_version; + + if (smu_version) + *smu_version = adev->pm.fw_version; } + return 0; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 2627870a786e..204e34549013 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -27,7 +27,6 @@ #include "amdgpu_drv.h" #include "amdgpu_pm.h" #include "amdgpu_dpm.h" -#include "amdgpu_smu.h" #include "atom.h" #include <linux/pci.h> #include <linux/hwmon.h> @@ -129,6 +128,8 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -145,9 +146,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "%s\n", - (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : - (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); + return sysfs_emit(buf, "%s\n", + (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : + (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); } static ssize_t amdgpu_set_power_dpm_state(struct device *dev, @@ -162,6 +163,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (strncmp("battery", buf, strlen("battery")) == 0) state = POWER_STATE_TYPE_BATTERY; @@ -268,6 +271,8 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -283,17 +288,17 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "%s\n", - (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : - (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : - (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : - (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : - (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" : - "unknown"); + return sysfs_emit(buf, "%s\n", + (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : + (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : + (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : + (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : + (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" : + "unknown"); } static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, @@ -310,6 +315,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (strncmp("low", buf, strlen("low")) == 0) { level = AMD_DPM_FORCED_LEVEL_LOW; @@ -408,6 +415,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -448,6 +457,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -472,7 +483,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, if (i == data.nums) i = -EINVAL; - return snprintf(buf, PAGE_SIZE, "%d\n", i); + return sysfs_emit(buf, "%d\n", i); } static ssize_t amdgpu_get_pp_force_state(struct device *dev, @@ -484,11 +495,13 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (adev->pp_force_state_enabled) return amdgpu_get_pp_cur_state(dev, attr, buf); else - return snprintf(buf, PAGE_SIZE, "\n"); + return sysfs_emit(buf, "\n"); } static ssize_t amdgpu_set_pp_force_state(struct device *dev, @@ -504,6 +517,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (strlen(buf) == 1) adev->pp_force_state_enabled = false; @@ -564,6 +579,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -602,6 +619,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -764,6 +783,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (count > 127) return -EINVAL; @@ -865,6 +886,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -916,6 +939,8 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = kstrtou64(buf, 0, &featuremask); if (ret) @@ -927,14 +952,7 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); - if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } - } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { + if (adev->powerplay.pp_funcs->set_ppfeature_status) { ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); if (ret) { pm_runtime_mark_last_busy(ddev->dev); @@ -959,6 +977,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1018,6 +1038,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1083,6 +1105,8 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) @@ -1239,6 +1263,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1254,7 +1280,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, @@ -1269,6 +1295,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = kstrtol(buf, 0, &value); @@ -1312,6 +1340,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1327,7 +1357,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, @@ -1342,6 +1372,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = kstrtol(buf, 0, &value); @@ -1405,6 +1437,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1443,6 +1477,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; tmp[0] = *(buf); tmp[1] = '\0'; @@ -1506,6 +1542,8 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(ddev->dev); if (r < 0) { @@ -1523,7 +1561,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } /** @@ -1544,6 +1582,8 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(ddev->dev); if (r < 0) { @@ -1561,7 +1601,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } /** @@ -1587,6 +1627,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (adev->flags & AMD_IS_APU) return -ENODATA; @@ -1605,8 +1647,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", - count0, count1, pcie_get_mps(adev->pdev)); + return sysfs_emit(buf, "%llu %llu %i\n", + count0, count1, pcie_get_mps(adev->pdev)); } /** @@ -1628,9 +1670,11 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (adev->unique_id) - return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); + return sysfs_emit(buf, "%016llx\n", adev->unique_id); return 0; } @@ -1657,10 +1701,10 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n", - adev_to_drm(adev)->unique, - atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", - adev->throttling_logging_rs.interval / HZ + 1); + return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n", + adev_to_drm(adev)->unique, + atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", + adev->throttling_logging_rs.interval / HZ + 1); } static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, @@ -1726,6 +1770,8 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1954,6 +2000,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (channel >= PP_TEMP_MAX) return -EINVAL; @@ -1991,7 +2039,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return sysfs_emit(buf, "%d\n", temp); } static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, @@ -2007,7 +2055,7 @@ static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, else temp = adev->pm.dpm.thermal.max_temp; - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return sysfs_emit(buf, "%d\n", temp); } static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, @@ -2023,7 +2071,7 @@ static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, else temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return sysfs_emit(buf, "%d\n", temp); } static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, @@ -2039,7 +2087,7 @@ static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, else temp = adev->pm.dpm.thermal.max_mem_crit_temp; - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return sysfs_emit(buf, "%d\n", temp); } static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, @@ -2051,7 +2099,7 @@ static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, if (channel >= PP_TEMP_MAX) return -EINVAL; - return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label); + return sysfs_emit(buf, "%s\n", temp_label[channel].label); } static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, @@ -2077,7 +2125,7 @@ static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, break; } - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return sysfs_emit(buf, "%d\n", temp); } static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, @@ -2090,6 +2138,8 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (ret < 0) { @@ -2122,6 +2172,8 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = kstrtoint(buf, 10, &value); if (err) @@ -2172,6 +2224,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { @@ -2220,6 +2274,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { @@ -2253,6 +2309,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { @@ -2285,6 +2343,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2301,7 +2361,7 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); + return sysfs_emit(buf, "%d\n", min_rpm); } static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, @@ -2315,6 +2375,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2331,7 +2393,7 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); + return sysfs_emit(buf, "%d\n", max_rpm); } static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, @@ -2344,6 +2406,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { @@ -2376,6 +2440,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { @@ -2422,6 +2488,8 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (ret < 0) { @@ -2455,6 +2523,8 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = kstrtoint(buf, 10, &value); if (err) @@ -2496,6 +2566,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2513,14 +2585,14 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); + return sysfs_emit(buf, "%d\n", vddgfx); } static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "vddgfx\n"); + return sysfs_emit(buf, "vddgfx\n"); } static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, @@ -2533,6 +2605,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; /* only APUs have vddnb */ if (!(adev->flags & AMD_IS_APU)) @@ -2554,14 +2628,14 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); + return sysfs_emit(buf, "%d\n", vddnb); } static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "vddnb\n"); + return sysfs_emit(buf, "vddnb\n"); } static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, @@ -2575,6 +2649,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2595,7 +2671,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, /* convert to microwatts */ uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; - return snprintf(buf, PAGE_SIZE, "%u\n", uw); + return sysfs_emit(buf, "%u\n", uw); } static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, @@ -2619,6 +2695,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2656,6 +2734,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2693,6 +2773,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2722,7 +2804,7 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev, { int limit_type = to_sensor_dev_attr(attr)->index; - return snprintf(buf, PAGE_SIZE, "%s\n", + return sysfs_emit(buf, "%s\n", limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT"); } @@ -2739,6 +2821,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -2780,6 +2864,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2797,14 +2883,14 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); + return sysfs_emit(buf, "%u\n", sclk * 10 * 1000); } static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "sclk\n"); + return sysfs_emit(buf, "sclk\n"); } static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, @@ -2817,6 +2903,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2834,14 +2922,14 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); + return sysfs_emit(buf, "%u\n", mclk * 10 * 1000); } static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "mclk\n"); + return sysfs_emit(buf, "mclk\n"); } /** @@ -3390,6 +3478,8 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(dev->dev); if (r < 0) { diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h index 433dd1e9ec4f..610266088ff1 100644 --- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h @@ -100,7 +100,8 @@ #define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x40 #define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x41 -#define PPSMC_Message_Count 0x42 +#define PPSMC_MSG_GfxDriverResetRecovery 0x42 +#define PPSMC_Message_Count 0x43 //PPSMC Reset Types #define PPSMC_RESET_TYPE_WARM_RESET 0x00 diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 25d5f03aaa67..8bb224f6c762 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -195,6 +195,11 @@ struct smu_user_dpm_profile { uint32_t clk_dependency; }; +enum smu_event_type { + + SMU_EVENT_RESET_COMPLETE = 0, +}; + #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ do { \ tables[table_id].size = s; \ @@ -338,7 +343,6 @@ struct smu_power_context { struct smu_power_gate power_gate; }; - #define SMU_FEATURE_MAX (64) struct smu_feature { @@ -807,6 +811,13 @@ struct pptable_funcs { int (*check_fw_status)(struct smu_context *smu); /** + * @set_mp1_state: put SMU into a correct state for comming + * resume from runpm or gpu reset. + */ + int (*set_mp1_state)(struct smu_context *smu, + enum pp_mp1_state mp1_state); + + /** * @setup_pptable: Initialize the power play table and populate it with * default values. */ @@ -1160,6 +1171,12 @@ struct pptable_funcs { * @set_light_sbr: Set light sbr mode for the SMU. */ int (*set_light_sbr)(struct smu_context *smu, bool enable); + + /** + * @wait_for_event: Wait for events from SMU. + */ + int (*wait_for_event)(struct smu_context *smu, + enum smu_event_type event, uint64_t event_arg); }; typedef enum { @@ -1235,64 +1252,13 @@ enum smu_cmn2asic_mapping_type { [profile] = {1, (workload)} #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) -int smu_load_microcode(struct smu_context *smu); - -int smu_check_fw_status(struct smu_context *smu); - -int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); - -int smu_set_fan_speed_rpm(void *handle, uint32_t speed); - int smu_get_power_limit(struct smu_context *smu, uint32_t *limit, enum smu_ppt_limit_level limit_level); -int smu_set_power_limit(void *handle, uint32_t limit); -int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf); - -int smu_od_edit_dpm_table(void *handle, - enum PP_OD_DPM_TABLE_COMMAND type, - long *input, uint32_t size); - -int smu_read_sensor(void *handle, int sensor, void *data, int *size); -int smu_get_power_profile_mode(void *handle, char *buf); -int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size); -u32 smu_get_fan_control_mode(void *handle); -int smu_set_fan_control_mode(struct smu_context *smu, int value); -void smu_pp_set_fan_control_mode(void *handle, u32 value); -int smu_get_fan_speed_percent(void *handle, u32 *speed); -int smu_set_fan_speed_percent(void *handle, u32 speed); -int smu_get_fan_speed_rpm(void *handle, uint32_t *speed); - -int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk); - -int smu_get_clock_by_type_with_latency(struct smu_context *smu, - enum smu_clk_type clk_type, - struct pp_clock_levels_with_latency *clocks); - -int smu_display_clock_voltage_request(struct smu_context *smu, - struct pp_display_clock_request *clock_req); -int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch); - -int smu_set_xgmi_pstate(void *handle, - uint32_t pstate); - -int smu_set_azalia_d3_pme(struct smu_context *smu); - -bool smu_baco_is_support(struct smu_context *smu); -int smu_get_baco_capability(void *handle, bool *cap); - -int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state); - -int smu_baco_enter(struct smu_context *smu); -int smu_baco_exit(struct smu_context *smu); -int smu_baco_set_state(void *handle, int state); - - bool smu_mode1_reset_is_support(struct smu_context *smu); bool smu_mode2_reset_is_support(struct smu_context *smu); int smu_mode1_reset(struct smu_context *smu); -int smu_mode2_reset(void *handle); extern const struct amd_ip_funcs smu_ip_funcs; @@ -1302,68 +1268,24 @@ extern const struct amdgpu_ip_block_version smu_v13_0_ip_block; bool is_support_sw_smu(struct amdgpu_device *adev); bool is_support_cclk_dpm(struct amdgpu_device *adev); -int smu_reset(struct smu_context *smu); -int smu_sys_get_pp_table(void *handle, char **table); -int smu_sys_set_pp_table(void *handle, const char *buf, size_t size); -int smu_get_power_num_states(void *handle, struct pp_states_info *state_info); -enum amd_pm_state_type smu_get_current_power_state(void *handle); int smu_write_watermarks_table(struct smu_context *smu); -int smu_set_watermarks_for_clock_ranges( - struct smu_context *smu, - struct pp_smu_wm_range_sets *clock_ranges); - -/* smu to display interface */ -extern int smu_display_configuration_change(struct smu_context *smu, const - struct amd_pp_display_configuration - *display_config); -extern int smu_dpm_set_power_gate(void *handle, uint32_t block_type, bool gate); -extern int smu_handle_task(struct smu_context *smu, - enum amd_dpm_forced_level level, - enum amd_pp_task task_id, - bool lock_needed); -extern int smu_handle_dpm_task(void *handle, - enum amd_pp_task task_id, - enum amd_pm_state_type *user_state); -int smu_switch_power_profile(void *handle, - enum PP_SMC_POWER_PROFILE type, - bool en); + int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); -u32 smu_get_mclk(void *handle, bool low); -u32 smu_get_sclk(void *handle, bool low); + int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); -enum amd_dpm_forced_level smu_get_performance_level(void *handle); -int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level); -int smu_set_display_count(struct smu_context *smu, uint32_t count); -int smu_set_ac_dc(struct smu_context *smu); -int smu_sys_get_pp_feature_mask(void *handle, char *buf); -int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask); -int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask); -int smu_set_mp1_state(void *handle, - enum pp_mp1_state mp1_state); -int smu_set_df_cstate(void *handle, - enum pp_df_cstate state); -int smu_allow_xgmi_power_down(struct smu_context *smu, bool en); -int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, - struct pp_smu_nv_clock_table *max_clocks); - -int smu_get_uclk_dpm_states(struct smu_context *smu, - unsigned int *clock_values_in_khz, - unsigned int *num_states); +int smu_set_ac_dc(struct smu_context *smu); -int smu_get_dpm_clock_table(struct smu_context *smu, - struct dpm_clocks *clock_table); +int smu_allow_xgmi_power_down(struct smu_context *smu, bool en); int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value); -ssize_t smu_sys_get_gpu_metrics(void *handle, void **table); - -int smu_enable_mgpu_fan_boost(void *handle); -int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state); - int smu_set_light_sbr(struct smu_context *smu, bool enable); +int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, + uint64_t event_arg); + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h index df2ead254f37..d23533bda002 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h +++ b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h @@ -435,8 +435,12 @@ typedef struct { uint8_t GpioI2cSda; // Serial Data uint16_t spare5; + uint16_t XgmiMaxCurrent; // in Amps + int8_t XgmiOffset; // in Amps + uint8_t Padding_TelemetryXgmi; + //reserved - uint32_t reserved[16]; + uint32_t reserved[15]; } PPTable_t; @@ -481,7 +485,10 @@ typedef struct { uint16_t TemperatureAllHBM[4] ; uint32_t GfxBusyAcc ; uint32_t DramBusyAcc ; - uint32_t Spare[4]; + uint32_t EnergyAcc64bitLow ; //15.259uJ resolution + uint32_t EnergyAcc64bitHigh ; + uint32_t TimeStampLow ; //10ns resolution + uint32_t TimeStampHigh ; // Padding - ignore uint32_t MmHubPadding[8]; // SMU internal use diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h index 5bfb60f41dd4..89a16dcd0fff 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h @@ -225,6 +225,7 @@ __SMU_DUMMY_MAP(DisableDeterminism), \ __SMU_DUMMY_MAP(SetUclkDpmMode), \ __SMU_DUMMY_MAP(LightSBR), \ + __SMU_DUMMY_MAP(GfxDriverResetRecovery), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index ad4db2edf1fb..d5182bbaa598 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -61,8 +61,8 @@ #define LINK_WIDTH_MAX 6 #define LINK_SPEED_MAX 3 -static __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16}; -static __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160}; +static const __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static const __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160}; static const struct smu_temperature_range __maybe_unused smu11_thermal_policy[] = diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h index 80208e1eefc9..8145e1cbf181 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -26,7 +26,7 @@ #include "amdgpu_smu.h" #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF -#define SMU13_DRIVER_IF_VERSION_ALDE 0x5 +#define SMU13_DRIVER_IF_VERSION_ALDE 0x6 /* MP Apertures */ #define MP0_Public 0x03800000 @@ -268,5 +268,8 @@ int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu); int smu_v13_0_gfx_ulv_control(struct smu_context *smu, bool enablement); +int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, + uint64_t event_arg); + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index f5d59fa3a030..f5fe540cd536 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1297,19 +1297,18 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; break; case AMDGPU_PP_SENSOR_GPU_LOAD: - if (has_gfx_busy) { + if (!has_gfx_busy) + ret = -EOPNOTSUPP; + else { ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxBusy, &activity_percent); if (!ret) - activity_percent = activity_percent > 100 ? 100 : activity_percent; + *((uint32_t *)value) = min(activity_percent, (u32)100); else - return -EIO; - *((uint32_t *)value) = activity_percent; - return 0; - } else { - return -EOPNOTSUPP; + ret = -EIO; } + break; default: ret = -EOPNOTSUPP; break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 7edafef095a3..0541bfc81c1b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -1224,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (hwmgr->chip_id == CHIP_POLARIS10) || (hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12) || - (hwmgr->chip_id == CHIP_TONGA)) + (hwmgr->chip_id == CHIP_TONGA) || + (hwmgr->chip_id == CHIP_TOPAZ)) PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); @@ -3330,7 +3331,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) && !hwmgr->display_config->multi_monitor_in_sync) || - smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time); + (hwmgr->display_config->num_display && + smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time)); disable_mclk_switching = disable_mclk_switching_for_frame_lock || disable_mclk_switching_for_display; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index b6d7b7b224a9..1a097e608808 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -52,8 +52,8 @@ #define LINK_WIDTH_MAX 6 #define LINK_SPEED_MAX 3 -static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; -static int link_speed[] = {25, 50, 80, 160}; +static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static const int link_speed[] = {25, 50, 80, 160}; static int vega12_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 213c9c6b4462..d3177a534fdf 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -57,8 +57,8 @@ #define LINK_WIDTH_MAX 6 #define LINK_SPEED_MAX 3 -static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; -static int link_speed[] = {25, 50, 80, 160}; +static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static const int link_speed[] = {25, 50, 80, 160}; static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index e722adcf2f53..e0eb7ca112e2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -51,8 +51,19 @@ static const struct amd_pm_funcs swsmu_pm_funcs; static int smu_force_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask); - -int smu_sys_get_pp_feature_mask(void *handle, char *buf) +static int smu_handle_task(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum amd_pp_task task_id, + bool lock_needed); +static int smu_reset(struct smu_context *smu); +static int smu_set_fan_speed_percent(void *handle, u32 speed); +static int smu_set_fan_control_mode(struct smu_context *smu, int value); +static int smu_set_power_limit(void *handle, uint32_t limit); +static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); +static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); + +static int smu_sys_get_pp_feature_mask(void *handle, + char *buf) { struct smu_context *smu = handle; int size = 0; @@ -69,7 +80,8 @@ int smu_sys_get_pp_feature_mask(void *handle, char *buf) return size; } -int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask) +static int smu_sys_set_pp_feature_mask(void *handle, + uint64_t new_mask) { struct smu_context *smu = handle; int ret = 0; @@ -142,7 +154,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu, return ret; } -u32 smu_get_mclk(void *handle, bool low) +static u32 smu_get_mclk(void *handle, bool low) { struct smu_context *smu = handle; uint32_t clk_freq; @@ -156,7 +168,7 @@ u32 smu_get_mclk(void *handle, bool low) return clk_freq * 100; } -u32 smu_get_sclk(void *handle, bool low) +static u32 smu_get_sclk(void *handle, bool low) { struct smu_context *smu = handle; uint32_t clk_freq; @@ -256,8 +268,9 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu, * Under this case, the smu->mutex lock protection is already enforced on * the parent API smu_force_performance_level of the call path. */ -int smu_dpm_set_power_gate(void *handle, uint32_t block_type, - bool gate) +static int smu_dpm_set_power_gate(void *handle, + uint32_t block_type, + bool gate) { struct smu_context *smu = handle; int ret = 0; @@ -406,8 +419,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; } -int smu_get_power_num_states(void *handle, - struct pp_states_info *state_info) +static int smu_get_power_num_states(void *handle, + struct pp_states_info *state_info) { if (!state_info) return -EINVAL; @@ -442,7 +455,8 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev) } -int smu_sys_get_pp_table(void *handle, char **table) +static int smu_sys_get_pp_table(void *handle, + char **table) { struct smu_context *smu = handle; struct smu_table_context *smu_table = &smu->smu_table; @@ -468,7 +482,9 @@ int smu_sys_get_pp_table(void *handle, char **table) return powerplay_table_size; } -int smu_sys_set_pp_table(void *handle, const char *buf, size_t size) +static int smu_sys_set_pp_table(void *handle, + const char *buf, + size_t size) { struct smu_context *smu = handle; struct smu_table_context *smu_table = &smu->smu_table; @@ -632,6 +648,7 @@ err0_out: return ret; } + static int smu_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1337,7 +1354,7 @@ static int smu_disable_dpms(struct smu_context *smu) bool use_baco = !smu->is_apu && ((amdgpu_in_reset(adev) && (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || - ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); + ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); /* * For custom pptable uploading, skip the DPM features @@ -1430,7 +1447,7 @@ static int smu_hw_fini(void *handle) return smu_smc_hw_cleanup(smu); } -int smu_reset(struct smu_context *smu) +static int smu_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret; @@ -1474,7 +1491,8 @@ static int smu_suspend(void *handle) smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); - if (smu->is_apu) + /* skip CGPG when in S0ix */ + if (smu->is_apu && !adev->in_s0ix) smu_set_gfx_cgpg(&adev->smu, false); return 0; @@ -1518,9 +1536,10 @@ static int smu_resume(void *handle) return 0; } -int smu_display_configuration_change(struct smu_context *smu, - const struct amd_pp_display_configuration *display_config) +static int smu_display_configuration_change(void *handle, + const struct amd_pp_display_configuration *display_config) { + struct smu_context *smu = handle; int index = 0; int num_of_active_display = 0; @@ -1676,10 +1695,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, return ret; } -int smu_handle_task(struct smu_context *smu, - enum amd_dpm_forced_level level, - enum amd_pp_task task_id, - bool lock_needed) +static int smu_handle_task(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum amd_pp_task task_id, + bool lock_needed) { int ret = 0; @@ -1711,9 +1730,9 @@ out: return ret; } -int smu_handle_dpm_task(void *handle, - enum amd_pp_task task_id, - enum amd_pm_state_type *user_state) +static int smu_handle_dpm_task(void *handle, + enum amd_pp_task task_id, + enum amd_pm_state_type *user_state) { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm = &smu->smu_dpm; @@ -1722,10 +1741,9 @@ int smu_handle_dpm_task(void *handle, } - -int smu_switch_power_profile(void *handle, - enum PP_SMC_POWER_PROFILE type, - bool en) +static int smu_switch_power_profile(void *handle, + enum PP_SMC_POWER_PROFILE type, + bool en) { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -1761,7 +1779,7 @@ int smu_switch_power_profile(void *handle, return 0; } -enum amd_dpm_forced_level smu_get_performance_level(void *handle) +static enum amd_dpm_forced_level smu_get_performance_level(void *handle) { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -1780,7 +1798,8 @@ enum amd_dpm_forced_level smu_get_performance_level(void *handle) return level; } -int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level) +static int smu_force_performance_level(void *handle, + enum amd_dpm_forced_level level) { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -1815,8 +1834,9 @@ int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level) return ret; } -int smu_set_display_count(struct smu_context *smu, uint32_t count) +static int smu_set_display_count(void *handle, uint32_t count) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -1859,7 +1879,9 @@ static int smu_force_smuclk_levels(struct smu_context *smu, return ret; } -int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask) +static int smu_force_ppclk_levels(void *handle, + enum pp_clock_type type, + uint32_t mask) { struct smu_context *smu = handle; enum smu_clk_type clk_type; @@ -1903,48 +1925,28 @@ int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask) * However, the mp1 state setting should still be granted * even if the dpm_enabled cleared. */ -int smu_set_mp1_state(void *handle, - enum pp_mp1_state mp1_state) +static int smu_set_mp1_state(void *handle, + enum pp_mp1_state mp1_state) { struct smu_context *smu = handle; - uint16_t msg; - int ret; + int ret = 0; if (!smu->pm_enabled) return -EOPNOTSUPP; mutex_lock(&smu->mutex); - switch (mp1_state) { - case PP_MP1_STATE_SHUTDOWN: - msg = SMU_MSG_PrepareMp1ForShutdown; - break; - case PP_MP1_STATE_UNLOAD: - msg = SMU_MSG_PrepareMp1ForUnload; - break; - case PP_MP1_STATE_RESET: - msg = SMU_MSG_PrepareMp1ForReset; - break; - case PP_MP1_STATE_NONE: - default: - mutex_unlock(&smu->mutex); - return 0; - } - - ret = smu_send_smc_msg(smu, msg, NULL); - /* some asics may not support those messages */ - if (ret == -EINVAL) - ret = 0; - if (ret) - dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); + if (smu->ppt_funcs && + smu->ppt_funcs->set_mp1_state) + ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); mutex_unlock(&smu->mutex); return ret; } -int smu_set_df_cstate(void *handle, - enum pp_df_cstate state) +static int smu_set_df_cstate(void *handle, + enum pp_df_cstate state) { struct smu_context *smu = handle; int ret = 0; @@ -2003,9 +2005,10 @@ int smu_write_watermarks_table(struct smu_context *smu) return ret; } -int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, - struct pp_smu_wm_range_sets *clock_ranges) +static int smu_set_watermarks_for_clock_ranges(void *handle, + struct pp_smu_wm_range_sets *clock_ranges) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2092,41 +2095,39 @@ const struct amdgpu_ip_block_version smu_v13_0_ip_block = .funcs = &smu_ip_funcs, }; -int smu_load_microcode(struct smu_context *smu) +static int smu_load_microcode(void *handle) { + struct smu_context *smu = handle; + struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); + /* This should be used for non PSP loading */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) + return 0; - if (smu->ppt_funcs->load_microcode) + if (smu->ppt_funcs->load_microcode) { ret = smu->ppt_funcs->load_microcode(smu); + if (ret) { + dev_err(adev->dev, "Load microcode failed\n"); + return ret; + } + } - mutex_unlock(&smu->mutex); - - return ret; -} - -int smu_check_fw_status(struct smu_context *smu) -{ - int ret = 0; - - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; - - mutex_lock(&smu->mutex); - - if (smu->ppt_funcs->check_fw_status) + if (smu->ppt_funcs->check_fw_status) { ret = smu->ppt_funcs->check_fw_status(smu); - - mutex_unlock(&smu->mutex); + if (ret) { + dev_err(adev->dev, "SMC is not ready\n"); + return ret; + } + } return ret; } -int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) +static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) { int ret = 0; @@ -2140,7 +2141,7 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) return ret; } -int smu_set_fan_speed_rpm(void *handle, uint32_t speed) +static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) { struct smu_context *smu = handle; u32 percent; @@ -2199,7 +2200,7 @@ int smu_get_power_limit(struct smu_context *smu, return ret; } -int smu_set_power_limit(void *handle, uint32_t limit) +static int smu_set_power_limit(void *handle, uint32_t limit) { struct smu_context *smu = handle; uint32_t limit_type = limit >> 24; @@ -2255,7 +2256,9 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl return ret; } -int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf) +static int smu_print_ppclk_levels(void *handle, + enum pp_clock_type type, + char *buf) { struct smu_context *smu = handle; enum smu_clk_type clk_type; @@ -2296,9 +2299,9 @@ int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf) return smu_print_smuclk_levels(smu, clk_type, buf); } -int smu_od_edit_dpm_table(void *handle, - enum PP_OD_DPM_TABLE_COMMAND type, - long *input, uint32_t size) +static int smu_od_edit_dpm_table(void *handle, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) { struct smu_context *smu = handle; int ret = 0; @@ -2317,7 +2320,10 @@ int smu_od_edit_dpm_table(void *handle, return ret; } -int smu_read_sensor(void *handle, int sensor, void *data, int *size_arg) +static int smu_read_sensor(void *handle, + int sensor, + void *data, + int *size_arg) { struct smu_context *smu = handle; struct smu_umd_pstate_table *pstate_table = @@ -2384,7 +2390,7 @@ unlock: return ret; } -int smu_get_power_profile_mode(void *handle, char *buf) +static int smu_get_power_profile_mode(void *handle, char *buf) { struct smu_context *smu = handle; int ret = 0; @@ -2402,7 +2408,9 @@ int smu_get_power_profile_mode(void *handle, char *buf) return ret; } -int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size) +static int smu_set_power_profile_mode(void *handle, + long *param, + uint32_t param_size) { struct smu_context *smu = handle; int ret = 0; @@ -2420,7 +2428,7 @@ int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size) } -u32 smu_get_fan_control_mode(void *handle) +static u32 smu_get_fan_control_mode(void *handle) { struct smu_context *smu = handle; u32 ret = 0; @@ -2438,7 +2446,7 @@ u32 smu_get_fan_control_mode(void *handle) return ret; } -int smu_set_fan_control_mode(struct smu_context *smu, int value) +static int smu_set_fan_control_mode(struct smu_context *smu, int value) { int ret = 0; @@ -2463,14 +2471,15 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value) return ret; } -void smu_pp_set_fan_control_mode(void *handle, u32 value) { +static void smu_pp_set_fan_control_mode(void *handle, u32 value) +{ struct smu_context *smu = handle; smu_set_fan_control_mode(smu, value); } -int smu_get_fan_speed_percent(void *handle, u32 *speed) +static int smu_get_fan_speed_percent(void *handle, u32 *speed) { struct smu_context *smu = handle; int ret = 0; @@ -2494,7 +2503,7 @@ int smu_get_fan_speed_percent(void *handle, u32 *speed) return ret; } -int smu_set_fan_speed_percent(void *handle, u32 speed) +static int smu_set_fan_speed_percent(void *handle, u32 speed) { struct smu_context *smu = handle; int ret = 0; @@ -2517,7 +2526,7 @@ int smu_set_fan_speed_percent(void *handle, u32 speed) return ret; } -int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) +static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) { struct smu_context *smu = handle; int ret = 0; @@ -2538,8 +2547,9 @@ int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) return ret; } -int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) +static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2554,10 +2564,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) return ret; } -int smu_get_clock_by_type_with_latency(struct smu_context *smu, - enum smu_clk_type clk_type, - struct pp_clock_levels_with_latency *clocks) +static int smu_get_clock_by_type_with_latency(void *handle, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks) { + struct smu_context *smu = handle; + enum smu_clk_type clk_type; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2565,17 +2577,38 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu, mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_clock_by_type_with_latency) + if (smu->ppt_funcs->get_clock_by_type_with_latency) { + switch (type) { + case amd_pp_sys_clock: + clk_type = SMU_GFXCLK; + break; + case amd_pp_mem_clock: + clk_type = SMU_MCLK; + break; + case amd_pp_dcef_clock: + clk_type = SMU_DCEFCLK; + break; + case amd_pp_disp_clock: + clk_type = SMU_DISPCLK; + break; + default: + dev_err(smu->adev->dev, "Invalid clock type!\n"); + mutex_unlock(&smu->mutex); + return -EINVAL; + } + ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); + } mutex_unlock(&smu->mutex); return ret; } -int smu_display_clock_voltage_request(struct smu_context *smu, - struct pp_display_clock_request *clock_req) +static int smu_display_clock_voltage_request(void *handle, + struct pp_display_clock_request *clock_req) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2592,8 +2625,10 @@ int smu_display_clock_voltage_request(struct smu_context *smu, } -int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) +static int smu_display_disable_memory_clock_switch(void *handle, + bool disable_memory_clock_switch) { + struct smu_context *smu = handle; int ret = -EINVAL; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2609,8 +2644,8 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl return ret; } -int smu_set_xgmi_pstate(void *handle, - uint32_t pstate) +static int smu_set_xgmi_pstate(void *handle, + uint32_t pstate) { struct smu_context *smu = handle; int ret = 0; @@ -2631,49 +2666,7 @@ int smu_set_xgmi_pstate(void *handle, return ret; } -int smu_set_azalia_d3_pme(struct smu_context *smu) -{ - int ret = 0; - - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; - - mutex_lock(&smu->mutex); - - if (smu->ppt_funcs->set_azalia_d3_pme) - ret = smu->ppt_funcs->set_azalia_d3_pme(smu); - - mutex_unlock(&smu->mutex); - - return ret; -} - -/* - * On system suspending or resetting, the dpm_enabled - * flag will be cleared. So that those SMU services which - * are not supported will be gated. - * - * However, the baco/mode1 reset should still be granted - * as they are still supported and necessary. - */ -bool smu_baco_is_support(struct smu_context *smu) -{ - bool ret = false; - - if (!smu->pm_enabled) - return false; - - mutex_lock(&smu->mutex); - - if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) - ret = smu->ppt_funcs->baco_is_support(smu); - - mutex_unlock(&smu->mutex); - - return ret; -} - -int smu_get_baco_capability(void *handle, bool *cap) +static int smu_get_baco_capability(void *handle, bool *cap) { struct smu_context *smu = handle; int ret = 0; @@ -2693,60 +2686,7 @@ int smu_get_baco_capability(void *handle, bool *cap) return ret; } - -int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) -{ - if (smu->ppt_funcs->baco_get_state) - return -EINVAL; - - mutex_lock(&smu->mutex); - *state = smu->ppt_funcs->baco_get_state(smu); - mutex_unlock(&smu->mutex); - - return 0; -} - -int smu_baco_enter(struct smu_context *smu) -{ - int ret = 0; - - if (!smu->pm_enabled) - return -EOPNOTSUPP; - - mutex_lock(&smu->mutex); - - if (smu->ppt_funcs->baco_enter) - ret = smu->ppt_funcs->baco_enter(smu); - - mutex_unlock(&smu->mutex); - - if (ret) - dev_err(smu->adev->dev, "Failed to enter BACO state!\n"); - - return ret; -} - -int smu_baco_exit(struct smu_context *smu) -{ - int ret = 0; - - if (!smu->pm_enabled) - return -EOPNOTSUPP; - - mutex_lock(&smu->mutex); - - if (smu->ppt_funcs->baco_exit) - ret = smu->ppt_funcs->baco_exit(smu); - - mutex_unlock(&smu->mutex); - - if (ret) - dev_err(smu->adev->dev, "Failed to exit BACO state!\n"); - - return ret; -} - -int smu_baco_set_state(void *handle, int state) +static int smu_baco_set_state(void *handle, int state) { struct smu_context *smu = handle; int ret = 0; @@ -2831,7 +2771,7 @@ int smu_mode1_reset(struct smu_context *smu) return ret; } -int smu_mode2_reset(void *handle) +static int smu_mode2_reset(void *handle) { struct smu_context *smu = handle; int ret = 0; @@ -2852,9 +2792,10 @@ int smu_mode2_reset(void *handle) return ret; } -int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, - struct pp_smu_nv_clock_table *max_clocks) +static int smu_get_max_sustainable_clocks_by_dc(void *handle, + struct pp_smu_nv_clock_table *max_clocks) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2870,10 +2811,11 @@ int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, return ret; } -int smu_get_uclk_dpm_states(struct smu_context *smu, - unsigned int *clock_values_in_khz, - unsigned int *num_states) +static int smu_get_uclk_dpm_states(void *handle, + unsigned int *clock_values_in_khz, + unsigned int *num_states) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2889,7 +2831,7 @@ int smu_get_uclk_dpm_states(struct smu_context *smu, return ret; } -enum amd_pm_state_type smu_get_current_power_state(void *handle) +static enum amd_pm_state_type smu_get_current_power_state(void *handle) { struct smu_context *smu = handle; enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; @@ -2907,9 +2849,10 @@ enum amd_pm_state_type smu_get_current_power_state(void *handle) return pm_state; } -int smu_get_dpm_clock_table(struct smu_context *smu, - struct dpm_clocks *clock_table) +static int smu_get_dpm_clock_table(void *handle, + struct dpm_clocks *clock_table) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2925,7 +2868,7 @@ int smu_get_dpm_clock_table(struct smu_context *smu, return ret; } -ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) +static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) { struct smu_context *smu = handle; ssize_t size; @@ -2945,7 +2888,7 @@ ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) return size; } -int smu_enable_mgpu_fan_boost(void *handle) +static int smu_enable_mgpu_fan_boost(void *handle) { struct smu_context *smu = handle; int ret = 0; @@ -2963,8 +2906,10 @@ int smu_enable_mgpu_fan_boost(void *handle) return ret; } -int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state) +static int smu_gfx_state_change_set(void *handle, + uint32_t state) { + struct smu_context *smu = handle; int ret = 0; mutex_lock(&smu->mutex); @@ -3026,4 +2971,31 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_power_profile_mode = smu_get_power_profile_mode, .force_clock_level = smu_force_ppclk_levels, .print_clock_levels = smu_print_ppclk_levels, + .get_uclk_dpm_states = smu_get_uclk_dpm_states, + .get_dpm_clock_table = smu_get_dpm_clock_table, + .display_configuration_change = smu_display_configuration_change, + .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, + .display_clock_voltage_request = smu_display_clock_voltage_request, + .set_active_display_count = smu_set_display_count, + .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, + .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, + .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, + .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, + .load_firmware = smu_load_microcode, + .gfx_state_change_set = smu_gfx_state_change_set, }; + +int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, + uint64_t event_arg) +{ + int ret = -EINVAL; + struct smu_context *smu = &adev->smu; + + if (smu->ppt_funcs->wait_for_event) { + mutex_lock(&smu->mutex); + ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); + mutex_unlock(&smu->mutex); + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index bbc03092b0a9..77693bf0840c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2365,6 +2365,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_fan_parameters = arcturus_get_fan_parameters, .interrupt_work = smu_v11_0_interrupt_work, .set_light_sbr = smu_v11_0_set_light_sbr, + .set_mp1_state = smu_cmn_set_mp1_state, }; void arcturus_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 3d0de2c958fa..f827096dc849 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -431,6 +431,30 @@ static int navi10_store_powerplay_table(struct smu_context *smu) return 0; } +static int navi10_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t mp1_fw_flags; + int ret = 0; + + ret = smu_cmn_set_mp1_state(smu, mp1_state); + if (ret) + return ret; + + if (mp1_state == PP_MP1_STATE_UNLOAD) { + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + + mp1_fw_flags &= ~MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK; + + WREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff), mp1_fw_flags); + } + + return 0; +} + static int navi10_setup_pptable(struct smu_context *smu) { int ret = 0; @@ -3031,6 +3055,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_fan_parameters = navi10_get_fan_parameters, .post_init = navi10_post_smu_init, .interrupt_work = smu_v11_0_interrupt_work, + .set_mp1_state = navi10_set_mp1_state, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 3621884c3293..72d9c1be1835 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -3110,6 +3110,23 @@ static int sienna_cichlid_system_features_control(struct smu_context *smu, return smu_v11_0_system_features_control(smu, en); } +static int sienna_cichlid_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_UNLOAD: + ret = smu_cmn_set_mp1_state(smu, mp1_state); + break; + default: + /* Ignore others */ + ret = 0; + } + + return ret; +} + static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask, .set_default_dpm_table = sienna_cichlid_set_default_dpm_table, @@ -3195,6 +3212,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_fan_parameters = sienna_cichlid_get_fan_parameters, .interrupt_work = smu_v11_0_interrupt_work, .gpo_control = sienna_cichlid_gpo_control, + .set_mp1_state = sienna_cichlid_set_mp1_state, }; void sienna_cichlid_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 0d137af1a78a..6274cae4a065 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -561,6 +561,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability; break; case 3: + case 4: default: v_3_3 = (struct atom_firmware_info_v3_3 *)header; smu->smu_table.boot_values.revision = v_3_3->firmware_revision; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index c5b387360f01..7bcd35840bf2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) static bool vangogh_is_dpm_running(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; uint32_t feature_mask[2]; uint64_t feature_enabled; + /* we need to re-init after suspend so return false */ + if (adev->in_suspend) + return false; + ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); if (ret) @@ -1889,6 +1894,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = { .get_ppt_limit = vangogh_get_ppt_limit, .get_power_limit = vangogh_get_power_limit, .set_power_limit = vangogh_set_power_limit, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, }; void vangogh_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 9813a86ca31a..bca02a9fb489 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -126,7 +126,8 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT MSG_MAP(SetExecuteDMATest, PPSMC_MSG_SetExecuteDMATest, 0), MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0), MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0), - MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0), + MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0), + MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), }; static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = { @@ -1265,6 +1266,233 @@ static bool aldebaran_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } +static void aldebaran_fill_i2c_req(SwI2cRequest_t *req, bool write, + uint8_t address, uint32_t numbytes, + uint8_t *data) +{ + int i; + + req->I2CcontrollerPort = 0; + req->I2CSpeed = 2; + req->SlaveAddress = address; + req->NumCmds = numbytes; + + for (i = 0; i < numbytes; i++) { + SwI2cCmd_t *cmd = &req->SwI2cCmds[i]; + + /* First 2 bytes are always write for lower 2b EEPROM address */ + if (i < 2) + cmd->CmdConfig = CMDCONFIG_READWRITE_MASK; + else + cmd->CmdConfig = write ? CMDCONFIG_READWRITE_MASK : 0; + + + /* Add RESTART for read after address filled */ + cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0; + + /* Add STOP in the end */ + cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0; + + /* Fill with data regardless if read or write to simplify code */ + cmd->ReadWriteData = data[i]; + } +} + +static int aldebaran_i2c_read_data(struct i2c_adapter *control, + uint8_t address, + uint8_t *data, + uint32_t numbytes) +{ + uint32_t i, ret = 0; + SwI2cRequest_t req; + struct amdgpu_device *adev = to_amdgpu_device(control); + struct smu_table_context *smu_table = &adev->smu.smu_table; + struct smu_table *table = &smu_table->driver_table; + + if (numbytes > MAX_SW_I2C_COMMANDS) { + dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n", + numbytes, MAX_SW_I2C_COMMANDS); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + aldebaran_fill_i2c_req(&req, false, address, numbytes, data); + + mutex_lock(&adev->smu.mutex); + /* Now read data starting with that address */ + ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, + true); + mutex_unlock(&adev->smu.mutex); + + if (!ret) { + SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr; + + /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */ + for (i = 0; i < numbytes; i++) + data[i] = res->SwI2cCmds[i].ReadWriteData; + + dev_dbg(adev->dev, "aldebaran_i2c_read_data, address = %x, bytes = %d, data :", + (uint16_t)address, numbytes); + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, + 8, 1, data, numbytes, false); + } else + dev_err(adev->dev, "aldebaran_i2c_read_data - error occurred :%x", ret); + + return ret; +} + +static int aldebaran_i2c_write_data(struct i2c_adapter *control, + uint8_t address, + uint8_t *data, + uint32_t numbytes) +{ + uint32_t ret; + SwI2cRequest_t req; + struct amdgpu_device *adev = to_amdgpu_device(control); + + if (numbytes > MAX_SW_I2C_COMMANDS) { + dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n", + numbytes, MAX_SW_I2C_COMMANDS); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + aldebaran_fill_i2c_req(&req, true, address, numbytes, data); + + mutex_lock(&adev->smu.mutex); + ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true); + mutex_unlock(&adev->smu.mutex); + + if (!ret) { + dev_dbg(adev->dev, "aldebaran_i2c_write(), address = %x, bytes = %d , data: ", + (uint16_t)address, numbytes); + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, + 8, 1, data, numbytes, false); + /* + * According to EEPROM spec there is a MAX of 10 ms required for + * EEPROM to flush internal RX buffer after STOP was issued at the + * end of write transaction. During this time the EEPROM will not be + * responsive to any more commands - so wait a bit more. + */ + msleep(10); + + } else + dev_err(adev->dev, "aldebaran_i2c_write- error occurred :%x", ret); + + return ret; +} + +static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0; + uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 }; + + for (i = 0; i < num; i++) { + /* + * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at + * once and hence the data needs to be spliced into chunks and sent each + * chunk separately + */ + data_size = msgs[i].len - 2; + data_chunk_size = MAX_SW_I2C_COMMANDS - 2; + next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff); + data_ptr = msgs[i].buf + 2; + + for (j = 0; j < data_size / data_chunk_size; j++) { + /* Insert the EEPROM dest addess, bits 0-15 */ + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff); + data_chunk[1] = (next_eeprom_addr & 0xff); + + if (msgs[i].flags & I2C_M_RD) { + ret = aldebaran_i2c_read_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, MAX_SW_I2C_COMMANDS); + + memcpy(data_ptr, data_chunk + 2, data_chunk_size); + } else { + + memcpy(data_chunk + 2, data_ptr, data_chunk_size); + + ret = aldebaran_i2c_write_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, MAX_SW_I2C_COMMANDS); + } + + if (ret) { + num = -EIO; + goto fail; + } + + next_eeprom_addr += data_chunk_size; + data_ptr += data_chunk_size; + } + + if (data_size % data_chunk_size) { + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff); + data_chunk[1] = (next_eeprom_addr & 0xff); + + if (msgs[i].flags & I2C_M_RD) { + ret = aldebaran_i2c_read_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, (data_size % data_chunk_size) + 2); + + memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size); + } else { + memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size); + + ret = aldebaran_i2c_write_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, (data_size % data_chunk_size) + 2); + } + + if (ret) { + num = -EIO; + goto fail; + } + } + } + +fail: + return num; +} + +static u32 aldebaran_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + + +static const struct i2c_algorithm aldebaran_i2c_algo = { + .master_xfer = aldebaran_i2c_xfer, + .functionality = aldebaran_i2c_func, +}; + +static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + int res; + + control->owner = THIS_MODULE; + control->class = I2C_CLASS_SPD; + control->dev.parent = &adev->pdev->dev; + control->algo = &aldebaran_i2c_algo; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU"); + + res = i2c_add_adapter(control); + if (res) + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + + return res; +} + +static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) +{ + i2c_del_adapter(control); +} + static void aldebaran_get_unique_id(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -1432,6 +1660,57 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v1_1); } +static int aldebaran_mode2_reset(struct smu_context *smu) +{ + u32 smu_version; + int ret = 0, index; + struct amdgpu_device *adev = smu->adev; + int timeout = 10; + + smu_cmn_get_smc_version(smu, NULL, &smu_version); + + index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, + SMU_MSG_GfxDeviceDriverReset); + + mutex_lock(&smu->message_lock); + if (smu_version >= 0x00441400) { + ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2); + /* This is similar to FLR, wait till max FLR timeout */ + msleep(100); + dev_dbg(smu->adev->dev, "restore config space...\n"); + /* Restore the config space saved during init */ + amdgpu_device_load_pci_state(adev->pdev); + + dev_dbg(smu->adev->dev, "wait for reset ack\n"); + while (ret == -ETIME && timeout) { + ret = smu_cmn_wait_for_response(smu); + /* Wait a bit more time for getting ACK */ + if (ret == -ETIME) { + --timeout; + usleep_range(500, 1000); + continue; + } + + if (ret != 1) { + dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n", + SMU_RESET_MODE_2, ret); + goto out; + } + } + + } else { + dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n", + smu_version); + } + + if (ret == 1) + ret = 0; +out: + mutex_unlock(&smu->message_lock); + + return ret; +} + static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) { #if 0 @@ -1460,6 +1739,19 @@ static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu) return true; } +static int aldebaran_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + switch (mp1_state) { + case PP_MP1_STATE_UNLOAD: + return smu_cmn_set_mp1_state(smu, mp1_state); + default: + return -EINVAL; + } + + return 0; +} + static const struct pptable_funcs aldebaran_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = aldebaran_get_allowed_feature_mask, @@ -1517,7 +1809,11 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, - .mode2_reset = smu_v13_0_mode2_reset, + .set_mp1_state = aldebaran_set_mp1_state, + .mode2_reset = aldebaran_mode2_reset, + .wait_for_event = smu_v13_0_wait_for_event, + .i2c_init = aldebaran_i2c_control_init, + .i2c_fini = aldebaran_i2c_control_fini, }; void aldebaran_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index bd3a9c89dc44..30c9ac635105 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -72,8 +72,8 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE -static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; -static int link_speed[] = {25, 50, 80, 160}; +static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static const int link_speed[] = {25, 50, 80, 160}; int smu_v13_0_init_microcode(struct smu_context *smu) { @@ -1374,19 +1374,43 @@ int smu_v13_0_mode1_reset(struct smu_context *smu) return ret; } -int smu_v13_0_mode2_reset(struct smu_context *smu) +static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu, + uint64_t event_arg) { - u32 smu_version; int ret = 0; - struct amdgpu_device *adev = smu->adev; - smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (smu_version >= 0x00440700) - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL); - else - dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n", smu_version); - /*TODO: mode2 reset wait time should be shorter, will modify it later*/ + + dev_dbg(smu->adev->dev, "waiting for smu reset complete\n"); + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL); + + return ret; +} + +int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, + uint64_t event_arg) +{ + int ret = -EINVAL; + + switch (event) { + case SMU_EVENT_RESET_COMPLETE: + ret = smu_v13_0_wait_for_reset_complete(smu, event_arg); + break; + default: + break; + } + + return ret; +} + +int smu_v13_0_mode2_reset(struct smu_context *smu) +{ + int ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + /*TODO: mode2 reset wait time should be shorter, add ASIC specific func if required */ if (!ret) msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); + return ret; } @@ -1686,10 +1710,14 @@ int smu_v13_0_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value) { - return smu_v13_0_get_dpm_freq_by_index(smu, - clk_type, - 0xff, - value); + int ret; + + ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); + /* FW returns 0 based max level, increment by one */ + if (!ret && value) + ++(*value); + + return ret; } int smu_v13_0_set_single_dpm_table(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 4b45953b36d8..dc7d2e71aa6f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -76,10 +76,10 @@ static void smu_cmn_read_arg(struct smu_context *smu, *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); } -static int smu_cmn_wait_for_response(struct smu_context *smu) +int smu_cmn_wait_for_response(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - uint32_t cur_value, i, timeout = adev->usec_timeout * 10; + uint32_t cur_value, i, timeout = adev->usec_timeout * 20; for (i = 0; i < timeout; i++) { cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); @@ -780,3 +780,31 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) header->structure_size = structure_size; } + +int smu_cmn_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + enum smu_message_type msg; + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_SHUTDOWN: + msg = SMU_MSG_PrepareMp1ForShutdown; + break; + case PP_MP1_STATE_UNLOAD: + msg = SMU_MSG_PrepareMp1ForUnload; + break; + case PP_MP1_STATE_RESET: + msg = SMU_MSG_PrepareMp1ForReset; + break; + case PP_MP1_STATE_NONE: + default: + return 0; + } + + ret = smu_cmn_send_smc_msg(smu, msg, NULL); + if (ret) + dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index c69250185575..da6ff6f024f9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -37,6 +37,8 @@ int smu_cmn_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg); +int smu_cmn_wait_for_response(struct smu_context *smu); + int smu_cmn_to_asic_specific_index(struct smu_context *smu, enum smu_cmn2asic_mapping_type type, uint32_t index); @@ -99,5 +101,8 @@ int smu_cmn_get_metrics_table(struct smu_context *smu, void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); +int smu_cmn_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state); + #endif #endif |