summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/amdgpu_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/amdgpu_pm.c')
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c436
1 files changed, 322 insertions, 114 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 9a54066ec0af..249cb0aeb5ae 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -411,7 +411,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data;
- int i, buf_len, ret;
+ uint32_t i;
+ int buf_len, ret;
if (amdgpu_in_reset(adev))
return -EPERM;
@@ -433,9 +434,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
- buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
+ buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
- buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
+ buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
@@ -735,6 +736,23 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* - a list of valid ranges for sclk, mclk, and voltage curve points
* labeled OD_RANGE
*
+ * < For APUs >
+ *
+ * Reading the file will display:
+ *
+ * - minimum and maximum engine clock labeled OD_SCLK
+ *
+ * - a list of valid ranges for sclk labeled OD_RANGE
+ *
+ * < For VanGogh >
+ *
+ * Reading the file will display:
+ *
+ * - minimum and maximum engine clock labeled OD_SCLK
+ * - minimum and maximum core clocks labeled OD_CCLK
+ *
+ * - a list of valid ranges for sclk and cclk labeled OD_RANGE
+ *
* To manually adjust these settings:
*
* - First select manual using power_dpm_force_performance_level
@@ -743,7 +761,10 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* string that contains "s/m index clock" to the file. The index
* should be 0 if to set minimum clock. And 1 if to set maximum
* clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
- * "m 1 800" will update maximum mclk to be 800Mhz.
+ * "m 1 800" will update maximum mclk to be 800Mhz. For core
+ * clocks on VanGogh, the string contains "p core index clock".
+ * E.g., "p 2 0 800" would set the minimum core clock on core
+ * 2 to 800Mhz.
*
* For sclk voltage curve, enter the new values by writing a
* string that contains "vc point clock voltage" to the file. The
@@ -903,7 +924,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
} else {
- size = snprintf(buf, PAGE_SIZE, "\n");
+ size = sysfs_emit(buf, "\n");
}
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -989,7 +1010,7 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
if (adev->powerplay.pp_funcs->get_ppfeature_status)
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
else
- size = snprintf(buf, PAGE_SIZE, "\n");
+ size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1050,7 +1071,7 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (adev->powerplay.pp_funcs->print_clock_levels)
size = amdgpu_dpm_print_clock_levels(adev, type, buf);
else
- size = snprintf(buf, PAGE_SIZE, "\n");
+ size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1449,7 +1470,7 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
if (adev->powerplay.pp_funcs->get_power_profile_mode)
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
else
- size = snprintf(buf, PAGE_SIZE, "\n");
+ size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1797,13 +1818,197 @@ out:
return size;
}
+/**
+ * DOC: smartshift_apu_power
+ *
+ * The amdgpu driver provides a sysfs API for reporting APU power
+ * share if it supports smartshift. The value is expressed as
+ * the proportion of stapm limit where stapm limit is the total APU
+ * power limit. The result is in percentage. If APU power is 130% of
+ * STAPM, then APU is using 30% of the dGPU's headroom.
+ */
+
+static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ uint32_t ss_power, size;
+ int r = 0;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ r = pm_runtime_get_sync(ddev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return r;
+ }
+
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+ (void *)&ss_power, &size);
+ if (r)
+ goto out;
+
+ r = sysfs_emit(buf, "%u%%\n", ss_power);
+
+out:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return r;
+}
+
+/**
+ * DOC: smartshift_dgpu_power
+ *
+ * The amdgpu driver provides a sysfs API for reporting the dGPU power
+ * share if the device is in HG and supports smartshift. The value
+ * is expressed as the proportion of stapm limit where stapm limit
+ * is the total APU power limit. The value is in percentage. If dGPU
+ * power is 20% higher than STAPM power(120%), it's using 20% of the
+ * APU's power headroom.
+ */
+
+static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ uint32_t ss_power, size;
+ int r = 0;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ r = pm_runtime_get_sync(ddev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return r;
+ }
+
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+ (void *)&ss_power, &size);
+
+ if (r)
+ goto out;
+
+ r = sysfs_emit(buf, "%u%%\n", ss_power);
+
+out:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return r;
+}
+
+/**
+ * DOC: smartshift_bias
+ *
+ * The amdgpu driver provides a sysfs API for reporting the
+ * smartshift(SS2.0) bias level. The value ranges from -100 to 100
+ * and the default is 0. -100 sets maximum preference to APU
+ * and 100 sets max perference to dGPU.
+ */
+
+static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int r = 0;
+
+ r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
+
+ return r;
+}
+
+static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int r = 0;
+ int bias = 0;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ r = pm_runtime_get_sync(ddev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return r;
+ }
+
+ r = kstrtoint(buf, 10, &bias);
+ if (r)
+ goto out;
+
+ if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
+ bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
+ else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
+ bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
+
+ amdgpu_smartshift_bias = bias;
+ r = count;
+
+ /* TODO: upadte bias level with SMU message */
+
+out:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return r;
+}
+
+
+static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+ uint32_t ss_power, size;
+
+ if (!amdgpu_acpi_is_power_shift_control_supported())
+ *states = ATTR_STATE_UNSUPPORTED;
+ else if ((adev->flags & AMD_IS_PX) &&
+ !amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+ *states = ATTR_STATE_UNSUPPORTED;
+ else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+ (void *)&ss_power, &size))
+ *states = ATTR_STATE_UNSUPPORTED;
+ else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+ (void *)&ss_power, &size))
+ *states = ATTR_STATE_UNSUPPORTED;
+
+ return 0;
+}
+
+static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+ uint32_t ss_power, size;
+
+ if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+ *states = ATTR_STATE_UNSUPPORTED;
+ else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+ (void *)&ss_power, &size))
+ *states = ATTR_STATE_UNSUPPORTED;
+ else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+ (void *)&ss_power, &size))
+ *states = ATTR_STATE_UNSUPPORTED;
+
+ return 0;
+}
+
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -1823,6 +2028,12 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
+ .attr_update = ss_power_attr_update),
+ AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
+ .attr_update = ss_power_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
+ .attr_update = ss_bias_attr_update),
};
static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
@@ -1883,14 +2094,19 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
*states = ATTR_STATE_UNSUPPORTED;
}
- if (asic_type == CHIP_ARCTURUS) {
- /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+ switch (asic_type) {
+ case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
+ /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
DEVICE_ATTR_IS(pp_dpm_socclk) ||
DEVICE_ATTR_IS(pp_dpm_fclk)) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
+ break;
+ default:
+ break;
}
if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
@@ -1922,7 +2138,7 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev,
BUG_ON(!attr);
- attr_update = attr->attr_update ? attr_update : default_attr_update;
+ attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
ret = attr_update(adev, attr, mask, &attr_states);
if (ret) {
@@ -2168,7 +2384,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return sprintf(buf, "%u\n", pwm_mode);
+ return sysfs_emit(buf, "%u\n", pwm_mode);
}
static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
@@ -2213,14 +2429,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%i\n", 0);
+ return sysfs_emit(buf, "%i\n", 0);
}
static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%i\n", 255);
+ return sysfs_emit(buf, "%i\n", 255);
}
static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
@@ -2258,10 +2474,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
return err;
}
- value = (value * 100) / 255;
-
- if (adev->powerplay.pp_funcs->set_fan_speed_percent)
- err = amdgpu_dpm_set_fan_speed_percent(adev, value);
+ if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
+ err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
else
err = -EINVAL;
@@ -2293,8 +2507,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return err;
}
- if (adev->powerplay.pp_funcs->get_fan_speed_percent)
- err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
+ if (adev->powerplay.pp_funcs->get_fan_speed_pwm)
+ err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
else
err = -EINVAL;
@@ -2304,9 +2518,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
if (err)
return err;
- speed = (speed * 255) / 100;
-
- return sprintf(buf, "%i\n", speed);
+ return sysfs_emit(buf, "%i\n", speed);
}
static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
@@ -2339,7 +2551,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
if (err)
return err;
- return sprintf(buf, "%i\n", speed);
+ return sysfs_emit(buf, "%i\n", speed);
}
static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
@@ -2436,7 +2648,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
if (err)
return err;
- return sprintf(buf, "%i\n", rpm);
+ return sysfs_emit(buf, "%i\n", rpm);
}
static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
@@ -2518,7 +2730,7 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
+ return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
@@ -2688,18 +2900,19 @@ static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%i\n", 0);
+ return sysfs_emit(buf, "%i\n", 0);
}
-static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+
+static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ enum pp_power_limit_level pp_limit_level)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- int limit_type = to_sensor_dev_attr(attr)->index;
- uint32_t limit = limit_type << 24;
- uint32_t max_limit = 0;
+ enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
+ uint32_t limit;
ssize_t size;
int r;
@@ -2708,22 +2921,22 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ if ( !(pp_funcs && pp_funcs->get_power_limit))
+ return -ENODATA;
+
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
- if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else if (pp_funcs && pp_funcs->get_power_limit) {
- pp_funcs->get_power_limit(adev->powerplay.pp_handle,
- &limit, &max_limit, true);
- size = snprintf(buf, PAGE_SIZE, "%u\n", max_limit * 1000000);
- } else {
- size = snprintf(buf, PAGE_SIZE, "\n");
- }
+ r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
+ pp_limit_level, power_type);
+
+ if (!r)
+ size = sysfs_emit(buf, "%u\n", limit * 1000000);
+ else
+ size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2731,83 +2944,31 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
return size;
}
-static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
+
+static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- int limit_type = to_sensor_dev_attr(attr)->index;
- uint32_t limit = limit_type << 24;
- ssize_t size;
- int r;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
+ return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
- if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else if (pp_funcs && pp_funcs->get_power_limit) {
- pp_funcs->get_power_limit(adev->powerplay.pp_handle,
- &limit, NULL, false);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else {
- size = snprintf(buf, PAGE_SIZE, "\n");
- }
+}
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
- return size;
}
static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- int limit_type = to_sensor_dev_attr(attr)->index;
- uint32_t limit = limit_type << 24;
- ssize_t size;
- int r;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
-
- if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_DEFAULT);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else if (pp_funcs && pp_funcs->get_power_limit) {
- pp_funcs->get_power_limit(adev->powerplay.pp_handle,
- &limit, NULL, true);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else {
- size = snprintf(buf, PAGE_SIZE, "\n");
- }
+ return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-
- return size;
}
+
static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3014,6 +3175,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
*
+ * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
+ * That will get the former one overridden.
+ *
* hwmon interfaces for GPU clocks:
*
* - freq1_input: the gfx/compute clock in hertz
@@ -3189,13 +3353,13 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if (!is_support_sw_smu(adev)) {
/* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
+ if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
(!adev->powerplay.pp_funcs->get_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
effective_mode &= ~S_IRUGO;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
(!adev->powerplay.pp_funcs->set_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
@@ -3219,8 +3383,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if (!is_support_sw_smu(adev)) {
/* hide max/min values if we can't both query and manage the fan */
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
+ !adev->powerplay.pp_funcs->get_fan_speed_pwm) &&
(!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
!adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
@@ -3534,6 +3698,45 @@ out:
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
+/*
+ * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
+ *
+ * Reads debug memory region allocated to PMFW
+ */
+static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ size_t smu_prv_buf_size;
+ void *smu_prv_buf;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ if (pp_funcs && pp_funcs->get_smu_prv_buf_details)
+ pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf,
+ &smu_prv_buf_size);
+ else
+ return -ENOSYS;
+
+ if (!smu_prv_buf || !smu_prv_buf_size)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
+ smu_prv_buf_size);
+}
+
+static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = amdgpu_pm_prv_buffer_read,
+ .llseek = default_llseek,
+};
+
#endif
void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
@@ -3545,5 +3748,10 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
&amdgpu_debugfs_pm_info_fops);
+ if (adev->pm.smu_prv_buffer_size > 0)
+ debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
+ adev,
+ &amdgpu_debugfs_pm_prv_buffer_fops,
+ adev->pm.smu_prv_buffer_size);
#endif
}