diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 732 |
1 files changed, 387 insertions, 345 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index abe94a55ecad..16596a9ccabe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -154,17 +154,17 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso * */ -static ssize_t amdgpu_get_dpm_state(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_power_dpm_state(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -189,18 +189,18 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); } -static ssize_t amdgpu_set_dpm_state(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) +static ssize_t amdgpu_set_power_dpm_state(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type state; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; if (strncmp("battery", buf, strlen("battery")) == 0) state = POWER_STATE_TYPE_BATTERY; @@ -294,17 +294,17 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, * */ -static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_dpm_forced_level level = 0xff; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -332,10 +332,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, "unknown"); } -static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) +static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; @@ -343,8 +343,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, enum amd_dpm_forced_level current_level = 0xff; int ret = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; if (strncmp("low", buf, strlen("low")) == 0) { level = AMD_DPM_FORCED_LEVEL_LOW; @@ -383,6 +383,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, return count; } + if (adev->asic_type == CHIP_RAVEN) { + if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { + if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL) + amdgpu_gfx_off_ctrl(adev, false); + else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL) + amdgpu_gfx_off_ctrl(adev, true); + } + } + /* profile_exit setting is valid only when current mode is in profile mode */ if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | @@ -436,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, struct pp_states_info data; int i, buf_len, ret; + if (adev->in_gpu_reset) + return -EPERM; + ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) return ret; @@ -444,8 +456,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, ret = smu_get_power_num_states(&adev->smu, &data); if (ret) return ret; - } else if (adev->powerplay.pp_funcs->get_pp_num_states) + } else if (adev->powerplay.pp_funcs->get_pp_num_states) { amdgpu_dpm_get_pp_num_states(adev, &data); + } else { + memset(&data, 0, sizeof(data)); + } pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -472,8 +487,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, enum amd_pm_state_type pm = 0; int i = 0, ret = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -511,8 +526,8 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; if (adev->pp_force_state_enabled) return amdgpu_get_pp_cur_state(dev, attr, buf); @@ -531,8 +546,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, unsigned long idx; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; if (strlen(buf) == 1) adev->pp_force_state_enabled = false; @@ -589,8 +604,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, char *table = NULL; int size, ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -631,8 +646,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int ret = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -681,7 +696,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * default power levels, write "r" (reset) to the file to reset them. * * - * < For Vega20 > + * < For Vega20 and newer ASICs > * * Reading the file will display: * @@ -736,8 +751,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, const char delimiter[3] = {' ', '\n', '\0'}; uint32_t type; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; if (count > 127) return -EINVAL; @@ -828,8 +843,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -870,18 +885,18 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, * the corresponding bit from original ppfeature masks and input the * new ppfeature masks. */ -static ssize_t amdgpu_set_pp_feature_status(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) +static ssize_t amdgpu_set_pp_features(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; uint64_t featuremask; int ret; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = kstrtou64(buf, 0, &featuremask); if (ret) @@ -914,17 +929,17 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev, return count; } -static ssize_t amdgpu_get_pp_feature_status(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_pp_features(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; ssize_t size; int ret; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -982,8 +997,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -1048,8 +1063,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) @@ -1082,8 +1097,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -1112,8 +1127,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, uint32_t mask = 0; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) @@ -1146,8 +1161,8 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -1176,8 +1191,8 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) @@ -1212,8 +1227,8 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -1242,8 +1257,8 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) @@ -1278,8 +1293,8 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -1308,8 +1323,8 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) @@ -1344,8 +1359,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -1374,8 +1389,8 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) @@ -1410,8 +1425,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, uint32_t value = 0; int ret; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -1438,8 +1453,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, int ret; long int value; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = kstrtol(buf, 0, &value); @@ -1479,8 +1494,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, uint32_t value = 0; int ret; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -1507,8 +1522,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, int ret; long int value; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = kstrtol(buf, 0, &value); @@ -1568,8 +1583,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -1606,15 +1621,15 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, long int profile_mode = 0; const char delimiter[3] = {' ', '\n', '\0'}; + if (adev->in_gpu_reset) + return -EPERM; + tmp[0] = *(buf); tmp[1] = '\0'; ret = kstrtol(tmp, 0, &profile_mode); if (ret) return -EINVAL; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; - if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { if (count < 2 || count > 127) return -EINVAL; @@ -1653,23 +1668,23 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } /** - * DOC: busy_percent + * DOC: gpu_busy_percent * * The amdgpu driver provides a sysfs API for reading how busy the GPU * is as a percentage. The file gpu_busy_percent is used for this. * The SMU firmware computes a percentage of load based on the * aggregate activity level in the IP cores. */ -static ssize_t amdgpu_get_busy_percent(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; int r, value, size = sizeof(value); - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; r = pm_runtime_get_sync(ddev->dev); if (r < 0) @@ -1696,16 +1711,16 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev, * The SMU firmware computes a percentage of load based on the * aggregate activity level in the IP cores. */ -static ssize_t amdgpu_get_memory_busy_percent(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; int r, value, size = sizeof(value); - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; r = pm_runtime_get_sync(ddev->dev); if (r < 0) @@ -1742,11 +1757,17 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - uint64_t count0, count1; + uint64_t count0 = 0, count1 = 0; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; + + if (adev->flags & AMD_IS_APU) + return -ENODATA; + + if (!adev->asic_funcs->get_pcie_usage) + return -ENODATA; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) @@ -1778,8 +1799,8 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; if (adev->unique_id) return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); @@ -1787,57 +1808,185 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, return 0; } -static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); -static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, - amdgpu_get_dpm_forced_performance_level, - amdgpu_set_dpm_forced_performance_level); -static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); -static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); -static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, - amdgpu_get_pp_force_state, - amdgpu_set_pp_force_state); -static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, - amdgpu_get_pp_table, - amdgpu_set_pp_table); -static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_sclk, - amdgpu_set_pp_dpm_sclk); -static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_mclk, - amdgpu_set_pp_dpm_mclk); -static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_socclk, - amdgpu_set_pp_dpm_socclk); -static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_fclk, - amdgpu_set_pp_dpm_fclk); -static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_dcefclk, - amdgpu_set_pp_dpm_dcefclk); -static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_pcie, - amdgpu_set_pp_dpm_pcie); -static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, - amdgpu_get_pp_sclk_od, - amdgpu_set_pp_sclk_od); -static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, - amdgpu_get_pp_mclk_od, - amdgpu_set_pp_mclk_od); -static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR, - amdgpu_get_pp_power_profile_mode, - amdgpu_set_pp_power_profile_mode); -static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, - amdgpu_get_pp_od_clk_voltage, - amdgpu_set_pp_od_clk_voltage); -static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, - amdgpu_get_busy_percent, NULL); -static DEVICE_ATTR(mem_busy_percent, S_IRUGO, - amdgpu_get_memory_busy_percent, NULL); -static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); -static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR, - amdgpu_get_pp_feature_status, - amdgpu_set_pp_feature_status); -static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL); +static struct amdgpu_device_attr amdgpu_device_attrs[] = { + AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC), +}; + +static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) +{ + struct device_attribute *dev_attr = &attr->dev_attr; + const char *attr_name = dev_attr->attr.name; + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + enum amd_asic_type asic_type = adev->asic_type; + + if (!(attr->flags & mask)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + +#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name)) + + if (DEVICE_ATTR_IS(pp_dpm_socclk)) { + if (asic_type < CHIP_VEGA10) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { + if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { + if (asic_type < CHIP_VEGA20) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { + if (asic_type == CHIP_ARCTURUS) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { + *states = ATTR_STATE_UNSUPPORTED; + if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || + (!is_support_sw_smu(adev) && hwmgr->od_enabled)) + *states = ATTR_STATE_SUPPORTED; + } else if (DEVICE_ATTR_IS(mem_busy_percent)) { + if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pcie_bw)) { + /* PCIe Perf counters won't work on APU nodes */ + if (adev->flags & AMD_IS_APU) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(unique_id)) { + if (!adev->unique_id) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_features)) { + if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10) + *states = ATTR_STATE_UNSUPPORTED; + } + + if (asic_type == CHIP_ARCTURUS) { + /* Arcturus does not support standalone mclk/socclk/fclk level setting */ + if (DEVICE_ATTR_IS(pp_dpm_mclk) || + DEVICE_ATTR_IS(pp_dpm_socclk) || + DEVICE_ATTR_IS(pp_dpm_fclk)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + } + +#undef DEVICE_ATTR_IS + + return 0; +} + + +static int amdgpu_device_attr_create(struct amdgpu_device *adev, + struct amdgpu_device_attr *attr, + uint32_t mask, struct list_head *attr_list) +{ + int ret = 0; + struct device_attribute *dev_attr = &attr->dev_attr; + const char *name = dev_attr->attr.name; + enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED; + struct amdgpu_device_attr_entry *attr_entry; + + int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update; + + BUG_ON(!attr); + + attr_update = attr->attr_update ? attr_update : default_attr_update; + + ret = attr_update(adev, attr, mask, &attr_states); + if (ret) { + dev_err(adev->dev, "failed to update device file %s, ret = %d\n", + name, ret); + return ret; + } + + if (attr_states == ATTR_STATE_UNSUPPORTED) + return 0; + + ret = device_create_file(adev->dev, dev_attr); + if (ret) { + dev_err(adev->dev, "failed to create device file %s, ret = %d\n", + name, ret); + } + + attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL); + if (!attr_entry) + return -ENOMEM; + + attr_entry->attr = attr; + INIT_LIST_HEAD(&attr_entry->entry); + + list_add_tail(&attr_entry->entry, attr_list); + + return ret; +} + +static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr) +{ + struct device_attribute *dev_attr = &attr->dev_attr; + + device_remove_file(adev->dev, dev_attr); +} + +static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, + struct list_head *attr_list); + +static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev, + struct amdgpu_device_attr *attrs, + uint32_t counts, + uint32_t mask, + struct list_head *attr_list) +{ + int ret = 0; + uint32_t i = 0; + + for (i = 0; i < counts; i++) { + ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list); + if (ret) + goto failed; + } + + return 0; + +failed: + amdgpu_device_attr_remove_groups(adev, attr_list); + + return ret; +} + +static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, + struct list_head *attr_list) +{ + struct amdgpu_device_attr_entry *entry, *entry_tmp; + + if (list_empty(attr_list)) + return ; + + list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) { + amdgpu_device_attr_remove(adev, entry->attr); + list_del(&entry->entry); + kfree(entry); + } +} static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -1847,6 +1996,9 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, int channel = to_sensor_dev_attr(attr)->index; int r, temp = 0, size = sizeof(temp); + if (adev->in_gpu_reset) + return -EPERM; + if (channel >= PP_TEMP_MAX) return -EINVAL; @@ -1978,6 +2130,9 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, u32 pwm_mode = 0; int ret; + if (adev->in_gpu_reset) + return -EPERM; + ret = pm_runtime_get_sync(adev->ddev->dev); if (ret < 0) return ret; @@ -2009,6 +2164,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, int err, ret; int value; + if (adev->in_gpu_reset) + return -EPERM; + err = kstrtoint(buf, 10, &value); if (err) return err; @@ -2058,6 +2216,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, u32 value; u32 pwm_mode; + if (adev->in_gpu_reset) + return -EPERM; + err = pm_runtime_get_sync(adev->ddev->dev); if (err < 0) return err; @@ -2107,6 +2268,9 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, int err; u32 speed = 0; + if (adev->in_gpu_reset) + return -EPERM; + err = pm_runtime_get_sync(adev->ddev->dev); if (err < 0) return err; @@ -2137,6 +2301,9 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, int err; u32 speed = 0; + if (adev->in_gpu_reset) + return -EPERM; + err = pm_runtime_get_sync(adev->ddev->dev); if (err < 0) return err; @@ -2166,6 +2333,9 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, u32 size = sizeof(min_rpm); int r; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); if (r < 0) return r; @@ -2191,6 +2361,9 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, u32 size = sizeof(max_rpm); int r; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); if (r < 0) return r; @@ -2215,6 +2388,9 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, int err; u32 rpm = 0; + if (adev->in_gpu_reset) + return -EPERM; + err = pm_runtime_get_sync(adev->ddev->dev); if (err < 0) return err; @@ -2244,6 +2420,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, u32 value; u32 pwm_mode; + if (adev->in_gpu_reset) + return -EPERM; + err = pm_runtime_get_sync(adev->ddev->dev); if (err < 0) return err; @@ -2290,6 +2469,9 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, u32 pwm_mode = 0; int ret; + if (adev->in_gpu_reset) + return -EPERM; + ret = pm_runtime_get_sync(adev->ddev->dev); if (ret < 0) return ret; @@ -2322,6 +2504,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, int value; u32 pwm_mode; + if (adev->in_gpu_reset) + return -EPERM; + err = kstrtoint(buf, 10, &value); if (err) return err; @@ -2362,6 +2547,9 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, u32 vddgfx; int r, size = sizeof(vddgfx); + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); if (r < 0) return r; @@ -2394,6 +2582,9 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, u32 vddnb; int r, size = sizeof(vddnb); + if (adev->in_gpu_reset) + return -EPERM; + /* only APUs have vddnb */ if (!(adev->flags & AMD_IS_APU)) return -EINVAL; @@ -2431,6 +2622,9 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, int r, size = sizeof(u32); unsigned uw; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); if (r < 0) return r; @@ -2467,6 +2661,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, ssize_t size; int r; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); if (r < 0) return r; @@ -2496,6 +2693,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, ssize_t size; int r; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); if (r < 0) return r; @@ -2526,6 +2726,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, int err; u32 value; + if (adev->in_gpu_reset) + return -EPERM; + if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -2564,6 +2767,9 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, uint32_t sclk; int r, size = sizeof(sclk); + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); if (r < 0) return r; @@ -2596,6 +2802,9 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, uint32_t mclk; int r, size = sizeof(mclk); + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); if (r < 0) return r; @@ -3238,8 +3447,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; int ret; + uint32_t mask = 0; if (adev->pm.sysfs_initialized) return 0; @@ -3247,6 +3456,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) if (adev->pm.dpm_enabled == 0) return 0; + INIT_LIST_HEAD(&adev->pm.pm_attr_list); + adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, DRIVER_NAME, adev, hwmon_groups); @@ -3257,160 +3468,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) return ret; } - ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); - if (ret) { - DRM_ERROR("failed to create device file for dpm state\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) { - DRM_ERROR("failed to create device file for dpm state\n"); - return ret; - } - - - ret = device_create_file(adev->dev, &dev_attr_pp_num_states); - if (ret) { - DRM_ERROR("failed to create device file pp_num_states\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); - if (ret) { - DRM_ERROR("failed to create device file pp_cur_state\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_force_state); - if (ret) { - DRM_ERROR("failed to create device file pp_force_state\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_table); - if (ret) { - DRM_ERROR("failed to create device file pp_table\n"); - return ret; - } - - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); - return ret; - } - - /* Arcturus does not support standalone mclk/socclk/fclk level setting */ - if (adev->asic_type == CHIP_ARCTURUS) { - dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO; - dev_attr_pp_dpm_mclk.store = NULL; - - dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO; - dev_attr_pp_dpm_socclk.store = NULL; - - dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO; - dev_attr_pp_dpm_fclk.store = NULL; + switch (amdgpu_virt_get_sriov_vf_mode(adev)) { + case SRIOV_VF_MODE_ONE_VF: + mask = ATTR_FLAG_ONEVF; + break; + case SRIOV_VF_MODE_MULTI_VF: + mask = 0; + break; + case SRIOV_VF_MODE_BARE_METAL: + default: + mask = ATTR_FLAG_MASK_ALL; + break; } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); - return ret; - } - if (adev->asic_type >= CHIP_VEGA10) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_socclk\n"); - return ret; - } - if (adev->asic_type != CHIP_ARCTURUS) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_dcefclk\n"); - return ret; - } - } - } - if (adev->asic_type >= CHIP_VEGA20) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_fclk\n"); - return ret; - } - } - if (adev->asic_type != CHIP_ARCTURUS) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_pcie\n"); - return ret; - } - } - ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); - if (ret) { - DRM_ERROR("failed to create device file pp_sclk_od\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); - if (ret) { - DRM_ERROR("failed to create device file pp_mclk_od\n"); - return ret; - } - ret = device_create_file(adev->dev, - &dev_attr_pp_power_profile_mode); - if (ret) { - DRM_ERROR("failed to create device file " - "pp_power_profile_mode\n"); - return ret; - } - if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) { - ret = device_create_file(adev->dev, - &dev_attr_pp_od_clk_voltage); - if (ret) { - DRM_ERROR("failed to create device file " - "pp_od_clk_voltage\n"); - return ret; - } - } - ret = device_create_file(adev->dev, - &dev_attr_gpu_busy_percent); - if (ret) { - DRM_ERROR("failed to create device file " - "gpu_busy_level\n"); - return ret; - } - /* APU does not have its own dedicated memory */ - if (!(adev->flags & AMD_IS_APU) && - (adev->asic_type != CHIP_VEGA10)) { - ret = device_create_file(adev->dev, - &dev_attr_mem_busy_percent); - if (ret) { - DRM_ERROR("failed to create device file " - "mem_busy_percent\n"); - return ret; - } - } - /* PCIe Perf counters won't work on APU nodes */ - if (!(adev->flags & AMD_IS_APU)) { - ret = device_create_file(adev->dev, &dev_attr_pcie_bw); - if (ret) { - DRM_ERROR("failed to create device file pcie_bw\n"); - return ret; - } - } - if (adev->unique_id) - ret = device_create_file(adev->dev, &dev_attr_unique_id); - if (ret) { - DRM_ERROR("failed to create device file unique_id\n"); + ret = amdgpu_device_attr_create_groups(adev, + amdgpu_device_attrs, + ARRAY_SIZE(amdgpu_device_attrs), + mask, + &adev->pm.pm_attr_list); + if (ret) return ret; - } - - if ((adev->asic_type >= CHIP_VEGA10) && - !(adev->flags & AMD_IS_APU)) { - ret = device_create_file(adev->dev, - &dev_attr_pp_features); - if (ret) { - DRM_ERROR("failed to create device file " - "pp_features\n"); - return ret; - } - } adev->pm.sysfs_initialized = true; @@ -3419,51 +3496,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - if (adev->pm.dpm_enabled == 0) return; if (adev->pm.int_hwmon_dev) hwmon_device_unregister(adev->pm.int_hwmon_dev); - device_remove_file(adev->dev, &dev_attr_power_dpm_state); - device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - - device_remove_file(adev->dev, &dev_attr_pp_num_states); - device_remove_file(adev->dev, &dev_attr_pp_cur_state); - device_remove_file(adev->dev, &dev_attr_pp_force_state); - device_remove_file(adev->dev, &dev_attr_pp_table); - - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (adev->asic_type >= CHIP_VEGA10) { - device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk); - if (adev->asic_type != CHIP_ARCTURUS) - device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); - } - if (adev->asic_type != CHIP_ARCTURUS) - device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (adev->asic_type >= CHIP_VEGA20) - device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk); - device_remove_file(adev->dev, &dev_attr_pp_sclk_od); - device_remove_file(adev->dev, &dev_attr_pp_mclk_od); - device_remove_file(adev->dev, - &dev_attr_pp_power_profile_mode); - if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) - device_remove_file(adev->dev, - &dev_attr_pp_od_clk_voltage); - device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); - if (!(adev->flags & AMD_IS_APU) && - (adev->asic_type != CHIP_VEGA10)) - device_remove_file(adev->dev, &dev_attr_mem_busy_percent); - if (!(adev->flags & AMD_IS_APU)) - device_remove_file(adev->dev, &dev_attr_pcie_bw); - if (adev->unique_id) - device_remove_file(adev->dev, &dev_attr_unique_id); - if ((adev->asic_type >= CHIP_VEGA10) && - !(adev->flags & AMD_IS_APU)) - device_remove_file(adev->dev, &dev_attr_pp_features); + + amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list); } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) @@ -3626,6 +3665,9 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) u32 flags = 0; int r; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(dev->dev); if (r < 0) return r; |