summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
diff options
context:
space:
mode:
authorEvan Quan <evan.quan@amd.com>2020-08-13 13:37:52 +0800
committerAlex Deucher <alexander.deucher@amd.com>2020-08-14 16:22:41 -0400
commite9372d23715d6802fd6d3763cb19c5a0c07ad641 (patch)
tree945bdf8ad011c7a0fbcd092df85ee172c428d45c /drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
parent20d3c28ce4e28ac4df9d139054aecb65ee5a9ee4 (diff)
drm/amd/powerplay: put those exposed power interfaces in amdgpu_dpm.c
As other power interfaces. Signed-off-by: Evan Quan <evan.quan@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Acked-by: Nirmoy Das <nirmoy.das@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c425
1 files changed, 425 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 3465fddff666..e480b54f6545 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -28,6 +28,11 @@
#include "amdgpu_dpm.h"
#include "atom.h"
#include "amd_pcie.h"
+#include "amdgpu_display.h"
+#include "hwmgr.h"
+#include <linux/power_supply.h>
+
+#define WIDTH_4K 3840
void amdgpu_dpm_print_class_info(u32 class, u32 class2)
{
@@ -1262,3 +1267,423 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
return ret;
}
+
+void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
+{
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ if (power_supply_is_system_supplied() > 0)
+ adev->pm.ac_power = true;
+ else
+ adev->pm.ac_power = false;
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->enable_bapm)
+ amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
+ mutex_unlock(&adev->pm.mutex);
+
+ if (is_support_sw_smu(adev))
+ smu_set_ac_dc(&adev->smu);
+ }
+}
+
+int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_read_sensor(&adev->smu, sensor, data, size);
+ else {
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
+ sensor, data, size);
+ else
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device,
+ pm.dpm.thermal.work);
+ /* switch to the thermal state */
+ enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
+ int temp, size = sizeof(temp);
+
+ if (!adev->pm.dpm_enabled)
+ return;
+
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
+ (void *)&temp, &size)) {
+ if (temp < adev->pm.dpm.thermal.min_temp)
+ /* switch back the user state */
+ dpm_state = adev->pm.dpm.user_state;
+ } else {
+ if (adev->pm.dpm.thermal.high_to_low)
+ /* switch back the user state */
+ dpm_state = adev->pm.dpm.user_state;
+ }
+ mutex_lock(&adev->pm.mutex);
+ if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+ adev->pm.dpm.thermal_active = true;
+ else
+ adev->pm.dpm.thermal_active = false;
+ adev->pm.dpm.state = dpm_state;
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_pm_compute_clocks(adev);
+}
+
+static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
+ enum amd_pm_state_type dpm_state)
+{
+ int i;
+ struct amdgpu_ps *ps;
+ u32 ui_class;
+ bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
+ true : false;
+
+ /* check if the vblank period is too short to adjust the mclk */
+ if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
+ if (amdgpu_dpm_vblank_too_short(adev))
+ single_display = false;
+ }
+
+ /* certain older asics have a separare 3D performance state,
+ * so try that first if the user selected performance
+ */
+ if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
+ dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
+ /* balanced states don't exist at the moment */
+ if (dpm_state == POWER_STATE_TYPE_BALANCED)
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+
+restart_search:
+ /* Pick the best power state based on current conditions */
+ for (i = 0; i < adev->pm.dpm.num_ps; i++) {
+ ps = &adev->pm.dpm.ps[i];
+ ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
+ switch (dpm_state) {
+ /* user states */
+ case POWER_STATE_TYPE_BATTERY:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ case POWER_STATE_TYPE_BALANCED:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ case POWER_STATE_TYPE_PERFORMANCE:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ /* internal states */
+ case POWER_STATE_TYPE_INTERNAL_UVD:
+ if (adev->pm.dpm.uvd_ps)
+ return adev->pm.dpm.uvd_ps;
+ else
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+ if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_BOOT:
+ return adev->pm.dpm.boot_ps;
+ case POWER_STATE_TYPE_INTERNAL_THERMAL:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_ACPI:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_ULV:
+ if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_3DPERF:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ return ps;
+ break;
+ default:
+ break;
+ }
+ }
+ /* use a fallback state if we didn't match */
+ switch (dpm_state) {
+ case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ goto restart_search;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+ case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+ if (adev->pm.dpm.uvd_ps) {
+ return adev->pm.dpm.uvd_ps;
+ } else {
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ }
+ case POWER_STATE_TYPE_INTERNAL_THERMAL:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
+ goto restart_search;
+ case POWER_STATE_TYPE_INTERNAL_ACPI:
+ dpm_state = POWER_STATE_TYPE_BATTERY;
+ goto restart_search;
+ case POWER_STATE_TYPE_BATTERY:
+ case POWER_STATE_TYPE_BALANCED:
+ case POWER_STATE_TYPE_INTERNAL_3DPERF:
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
+{
+ struct amdgpu_ps *ps;
+ enum amd_pm_state_type dpm_state;
+ int ret;
+ bool equal = false;
+
+ /* if dpm init failed */
+ if (!adev->pm.dpm_enabled)
+ return;
+
+ if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
+ /* add other state override checks here */
+ if ((!adev->pm.dpm.thermal_active) &&
+ (!adev->pm.dpm.uvd_active))
+ adev->pm.dpm.state = adev->pm.dpm.user_state;
+ }
+ dpm_state = adev->pm.dpm.state;
+
+ ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
+ if (ps)
+ adev->pm.dpm.requested_ps = ps;
+ else
+ return;
+
+ if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
+ printk("switching from power state:\n");
+ amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
+ printk("switching to power state:\n");
+ amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
+ }
+
+ /* update whether vce is active */
+ ps->vce_active = adev->pm.dpm.vce_active;
+ if (adev->powerplay.pp_funcs->display_configuration_changed)
+ amdgpu_dpm_display_configuration_changed(adev);
+
+ ret = amdgpu_dpm_pre_set_power_state(adev);
+ if (ret)
+ return;
+
+ if (adev->powerplay.pp_funcs->check_state_equal) {
+ if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
+ equal = false;
+ }
+
+ if (equal)
+ return;
+
+ amdgpu_dpm_set_power_state(adev);
+ amdgpu_dpm_post_set_power_state(adev);
+
+ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
+ if (adev->powerplay.pp_funcs->force_performance_level) {
+ if (adev->pm.dpm.thermal_active) {
+ enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
+ /* force low perf level for thermal */
+ amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
+ /* save the user's level */
+ adev->pm.dpm.forced_level = level;
+ } else {
+ /* otherwise, user selected level */
+ amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
+ }
+ }
+}
+
+void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+{
+ int i = 0;
+
+ if (!adev->pm.dpm_enabled)
+ return;
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_bandwidth_update(adev);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+
+ if (is_support_sw_smu(adev)) {
+ struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
+ smu_handle_task(&adev->smu,
+ smu_dpm->dpm_level,
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ true);
+ } else {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_get_active_displays(adev);
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+ adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+ adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
+ if (adev->pm.pm_display_cfg.vrefresh > 120)
+ adev->pm.pm_display_cfg.min_vblank_time = 0;
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+ mutex_unlock(&adev->pm.mutex);
+ }
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
+ } else {
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_get_active_displays(adev);
+ amdgpu_dpm_change_power_state_locked(adev);
+ mutex_unlock(&adev->pm.mutex);
+ }
+ }
+}
+
+void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+ } else {
+ adev->pm.dpm.uvd_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_pm_compute_clocks(adev);
+ } else {
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K) {
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->hwmgr_func &&
+ hwmgr->hwmgr_func->update_nbdpm_pstate)
+ hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+ !enable,
+ true);
+ }
+ }
+}
+
+void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
+ } else {
+ adev->pm.dpm.vce_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_pm_compute_clocks(adev);
+ } else {
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+ }
+}
+
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->powerplay.pp_funcs->print_power_state == NULL)
+ return;
+
+ for (i = 0; i < adev->pm.dpm.num_ps; i++)
+ amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
+
+}
+
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+}
+
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+{
+ int r;
+
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
+ r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
+ if (r) {
+ pr_err("smu firmware loading failed\n");
+ return r;
+ }
+ *smu_version = adev->pm.fw_version;
+ }
+ return 0;
+}