summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorEvan Quan <evan.quan@amd.com>2020-08-13 16:39:25 +0800
committerAlex Deucher <alexander.deucher@amd.com>2020-08-14 16:22:41 -0400
commite098bc9612c2b60f94920461d71c92962a916e73 (patch)
tree9523440f73a9db1943a4102da7b5ef4c5fb15ca4 /drivers/gpu/drm/amd/amdgpu
parente9372d23715d6802fd6d3763cb19c5a0c07ad641 (diff)
drm/amd/pm: optimize the power related source code layout
The target is to provide a clear entry point(for power routines). Also this can help to maintain a clear view about the frameworks used on different ASICs. Hopefully all these can make power part more friendly to play with. Signed-off-by: Evan Quan <evan.quan@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c1689
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h565
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c3613
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c3382
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.h229
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_smc.c218
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ppsmc.h200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/r600_dpm.h127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c8079
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h1015
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_smc.c273
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sislands_smc.h423
15 files changed, 5 insertions, 19940 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index dec1927ca75d..39976c7b100c 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -30,7 +30,7 @@ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
- -I$(FULL_AMD_PATH)/powerplay/inc \
+ -I$(FULL_AMD_PATH)/pm/inc \
-I$(FULL_AMD_PATH)/acp/include \
-I$(FULL_AMD_DISPLAY_PATH) \
-I$(FULL_AMD_DISPLAY_PATH)/include \
@@ -47,7 +47,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
- amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
+ atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
@@ -60,10 +60,10 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
# add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \
dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
-amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \
uvd_v3_1.o
amdgpu-y += \
@@ -105,10 +105,6 @@ amdgpu-y += \
psp_v11_0.o \
psp_v12_0.o
-# add SMC block
-amdgpu-y += \
- amdgpu_dpm.o
-
# add DCE block
amdgpu-y += \
dce_v10_0.o \
@@ -212,7 +208,7 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o
-include $(FULL_AMD_PATH)/powerplay/Makefile
+include $(FULL_AMD_PATH)/pm/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
deleted file mode 100644
index e480b54f6545..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ /dev/null
@@ -1,1689 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Alex Deucher
- */
-
-#include "amdgpu.h"
-#include "amdgpu_atombios.h"
-#include "amdgpu_i2c.h"
-#include "amdgpu_dpm.h"
-#include "atom.h"
-#include "amd_pcie.h"
-#include "amdgpu_display.h"
-#include "hwmgr.h"
-#include <linux/power_supply.h>
-
-#define WIDTH_4K 3840
-
-void amdgpu_dpm_print_class_info(u32 class, u32 class2)
-{
- const char *s;
-
- switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
- case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
- default:
- s = "none";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
- s = "battery";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
- s = "balanced";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
- s = "performance";
- break;
- }
- printk("\tui class: %s\n", s);
- printk("\tinternal class:");
- if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
- (class2 == 0))
- pr_cont(" none");
- else {
- if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- pr_cont(" boot");
- if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- pr_cont(" thermal");
- if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
- pr_cont(" limited_pwr");
- if (class & ATOM_PPLIB_CLASSIFICATION_REST)
- pr_cont(" rest");
- if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
- pr_cont(" forced");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- pr_cont(" 3d_perf");
- if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
- pr_cont(" ovrdrv");
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- pr_cont(" uvd");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
- pr_cont(" 3d_low");
- if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- pr_cont(" acpi");
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- pr_cont(" uvd_hd2");
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- pr_cont(" uvd_hd");
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- pr_cont(" uvd_sd");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
- pr_cont(" limited_pwr2");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- pr_cont(" ulv");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- pr_cont(" uvd_mvc");
- }
- pr_cont("\n");
-}
-
-void amdgpu_dpm_print_cap_info(u32 caps)
-{
- printk("\tcaps:");
- if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- pr_cont(" single_disp");
- if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
- pr_cont(" video");
- if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
- pr_cont(" no_dc");
- pr_cont("\n");
-}
-
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- printk("\tstatus:");
- if (rps == adev->pm.dpm.current_ps)
- pr_cont(" c");
- if (rps == adev->pm.dpm.requested_ps)
- pr_cont(" r");
- if (rps == adev->pm.dpm.boot_ps)
- pr_cont(" b");
- pr_cont("\n");
-}
-
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
-{
- struct drm_device *ddev = adev->ddev;
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
-
- adev->pm.dpm.new_active_crtcs = 0;
- adev->pm.dpm.new_active_crtc_count = 0;
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->enabled) {
- adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
- adev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
-}
-
-
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev->ddev;
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vblank_in_pixels;
- u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
-
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vblank_in_pixels =
- amdgpu_crtc->hw_mode.crtc_htotal *
- (amdgpu_crtc->hw_mode.crtc_vblank_end -
- amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2));
-
- vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- break;
- }
- }
- }
-
- return vblank_time_us;
-}
-
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev->ddev;
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vrefresh = 0;
-
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- break;
- }
- }
- }
-
- return vrefresh;
-}
-
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
-{
- switch (sensor) {
- case THERMAL_TYPE_RV6XX:
- case THERMAL_TYPE_RV770:
- case THERMAL_TYPE_EVERGREEN:
- case THERMAL_TYPE_SUMO:
- case THERMAL_TYPE_NI:
- case THERMAL_TYPE_SI:
- case THERMAL_TYPE_CI:
- case THERMAL_TYPE_KV:
- return true;
- case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
- case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
- return false; /* need special handling */
- case THERMAL_TYPE_NONE:
- case THERMAL_TYPE_EXTERNAL:
- case THERMAL_TYPE_EXTERNAL_GPIO:
- default:
- return false;
- }
-}
-
-union power_info {
- struct _ATOM_POWERPLAY_INFO info;
- struct _ATOM_POWERPLAY_INFO_V2 info_2;
- struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
- struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
- struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
-};
-
-union fan_info {
- struct _ATOM_PPLIB_FANTABLE fan;
- struct _ATOM_PPLIB_FANTABLE2 fan2;
- struct _ATOM_PPLIB_FANTABLE3 fan3;
-};
-
-static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
- ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
-{
- u32 size = atom_table->ucNumEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- int i;
- ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
-
- amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
- if (!amdgpu_table->entries)
- return -ENOMEM;
-
- entry = &atom_table->entries[0];
- for (i = 0; i < atom_table->ucNumEntries; i++) {
- amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
- (entry->ucClockHigh << 16);
- amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
- }
- amdgpu_table->count = atom_table->ucNumEntries;
-
- return 0;
-}
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
-
- return 0;
-}
-
-/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- union power_info *power_info;
- union fan_info *fan_info;
- ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- int ret, i;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- /* fan table */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
- if (power_info->pplib3.usFanTableOffset) {
- fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib3.usFanTableOffset));
- adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
- adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
- adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
- adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
- adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
- adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
- adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
- if (fan_info->fan.ucFanTableFormat >= 2)
- adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
- else
- adev->pm.dpm.fan.t_max = 10900;
- adev->pm.dpm.fan.cycle_delay = 100000;
- if (fan_info->fan.ucFanTableFormat >= 3) {
- adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
- adev->pm.dpm.fan.default_max_fan_pwm =
- le16_to_cpu(fan_info->fan3.usFanPWMMax);
- adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
- adev->pm.dpm.fan.fan_output_sensitivity =
- le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
- }
- adev->pm.dpm.fan.ucode_fan_control = true;
- }
- }
-
- /* clock dependancy tables, shedding tables */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
- if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
- ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
- (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
- if (clk_v->ucNumEntries) {
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
- le16_to_cpu(clk_v->entries[0].usSclkLow) |
- (clk_v->entries[0].ucSclkHigh << 16);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
- le16_to_cpu(clk_v->entries[0].usMclkLow) |
- (clk_v->entries[0].ucMclkHigh << 16);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
- le16_to_cpu(clk_v->entries[0].usVddc);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
- le16_to_cpu(clk_v->entries[0].usVddci);
- }
- }
- if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
- ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
- (ATOM_PPLIB_PhaseSheddingLimits_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
- ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
-
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
- kcalloc(psl->ucNumEntries,
- sizeof(struct amdgpu_phase_shedding_limits_entry),
- GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
-
- entry = &psl->entries[0];
- for (i = 0; i < psl->ucNumEntries; i++) {
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
- le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
- le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
- }
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
- psl->ucNumEntries;
- }
- }
-
- /* cac data */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
- adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
- adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
- adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
- adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
- if (adev->pm.dpm.tdp_od_limit)
- adev->pm.dpm.power_control = true;
- else
- adev->pm.dpm.power_control = false;
- adev->pm.dpm.tdp_adjustment = 0;
- adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
- adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
- adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
- if (power_info->pplib5.usCACLeakageTableOffset) {
- ATOM_PPLIB_CAC_Leakage_Table *cac_table =
- (ATOM_PPLIB_CAC_Leakage_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
- ATOM_PPLIB_CAC_Leakage_Record *entry;
- u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- entry = &cac_table->entries[0];
- for (i = 0; i < cac_table->ucNumEntries; i++) {
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
- le16_to_cpu(entry->usVddc1);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
- le16_to_cpu(entry->usVddc2);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
- le16_to_cpu(entry->usVddc3);
- } else {
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
- le16_to_cpu(entry->usVddc);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
- le32_to_cpu(entry->ulLeakageValue);
- }
- entry = (ATOM_PPLIB_CAC_Leakage_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
- }
- adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
- }
- }
-
- /* ext tables */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
- ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
- ext_hdr->usVCETableOffset) {
- VCEClockInfoArray *array = (VCEClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
- 1 + array->ucNumEntries * sizeof(VCEClockInfo));
- ATOM_PPLIB_VCE_State_Table *states =
- (ATOM_PPLIB_VCE_State_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
- 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
- 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
- ATOM_PPLIB_VCE_State_Record *state_entry;
- VCEClockInfo *vce_clk;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- state_entry = &states->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- vce_clk = (VCEClockInfo *)
- ((u8 *)&array->entries[0] +
- (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
- le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
- le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
- }
- adev->pm.dpm.num_of_vce_states =
- states->numEntries > AMD_MAX_VCE_LEVELS ?
- AMD_MAX_VCE_LEVELS : states->numEntries;
- for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
- vce_clk = (VCEClockInfo *)
- ((u8 *)&array->entries[0] +
- (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
- adev->pm.dpm.vce_states[i].evclk =
- le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
- adev->pm.dpm.vce_states[i].ecclk =
- le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
- adev->pm.dpm.vce_states[i].clk_idx =
- state_entry->ucClockInfoIndex & 0x3f;
- adev->pm.dpm.vce_states[i].pstate =
- (state_entry->ucClockInfoIndex & 0xc0) >> 6;
- state_entry = (ATOM_PPLIB_VCE_State_Record *)
- ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
- ext_hdr->usUVDTableOffset) {
- UVDClockInfoArray *array = (UVDClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
- 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- UVDClockInfo *uvd_clk = (UVDClockInfo *)
- ((u8 *)&array->entries[0] +
- (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
- le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
- le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
- ext_hdr->usSAMUTableOffset) {
- ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
- le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
- ext_hdr->usPPMTableOffset) {
- ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPPMTableOffset));
- adev->pm.dpm.dyn_state.ppm_table =
- kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.ppm_table) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
- adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
- le16_to_cpu(ppm->usCpuCoreNumber);
- adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
- le32_to_cpu(ppm->ulPlatformTDP);
- adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
- le32_to_cpu(ppm->ulSmallACPlatformTDP);
- adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
- le32_to_cpu(ppm->ulPlatformTDC);
- adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
- le32_to_cpu(ppm->ulSmallACPlatformTDC);
- adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
- le32_to_cpu(ppm->ulApuTDP);
- adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
- le32_to_cpu(ppm->ulDGpuTDP);
- adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
- le32_to_cpu(ppm->ulDGpuUlvPower);
- adev->pm.dpm.dyn_state.ppm_table->tj_max =
- le32_to_cpu(ppm->ulTjmax);
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
- ext_hdr->usACPTableOffset) {
- ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
- ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
- le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
- ext_hdr->usPowerTuneTableOffset) {
- u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- ATOM_PowerTune_Table *pt;
- adev->pm.dpm.dyn_state.cac_tdp_table =
- kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- if (rev > 0) {
- ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
- ppt->usMaximumPowerDeliveryLimit;
- pt = &ppt->power_tune_table;
- } else {
- ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
- pt = &ppt->power_tune_table;
- }
- adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
- adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
- le16_to_cpu(pt->usConfigurableTDP);
- adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
- adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
- le16_to_cpu(pt->usBatteryPowerLimit);
- adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
- le16_to_cpu(pt->usSmallPowerLimit);
- adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
- le16_to_cpu(pt->usLowCACLeakage);
- adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
- le16_to_cpu(pt->usHighCACLeakage);
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
- ext_hdr->usSclkVddgfxTableOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(
- &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
- dep_table);
- if (ret) {
- kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
-
- kfree(dyn_state->vddc_dependency_on_sclk.entries);
- kfree(dyn_state->vddci_dependency_on_mclk.entries);
- kfree(dyn_state->vddc_dependency_on_mclk.entries);
- kfree(dyn_state->mvdd_dependency_on_mclk.entries);
- kfree(dyn_state->cac_leakage_table.entries);
- kfree(dyn_state->phase_shedding_limits_table.entries);
- kfree(dyn_state->ppm_table);
- kfree(dyn_state->cac_tdp_table);
- kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
- kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
- kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
- kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
- kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
-}
-
-static const char *pp_lib_thermal_controller_names[] = {
- "NONE",
- "lm63",
- "adm1032",
- "adm1030",
- "max6649",
- "lm64",
- "f75375",
- "RV6xx",
- "RV770",
- "adt7473",
- "NONE",
- "External GPIO",
- "Evergreen",
- "emc2103",
- "Sumo",
- "Northern Islands",
- "Southern Islands",
- "lm96163",
- "Sea Islands",
- "Kaveri/Kabini",
-};
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- ATOM_PPLIB_POWERPLAYTABLE *power_table;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- ATOM_PPLIB_THERMALCONTROLLER *controller;
- struct amdgpu_i2c_bus_rec i2c_bus;
- u16 data_offset;
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return;
- power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
- (mode_info->atom_context->bios + data_offset);
- controller = &power_table->sThermalController;
-
- /* add the i2c bus for thermal/fan chip */
- if (controller->ucType > 0) {
- if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
- adev->pm.no_fan = true;
- adev->pm.fan_pulses_per_revolution =
- controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
- if (adev->pm.fan_pulses_per_revolution) {
- adev->pm.fan_min_rpm = controller->ucFanMinRPM;
- adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
- }
- if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_NI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_SI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_CI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_KV;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
- DRM_INFO("External GPIO thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
- DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
- DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
- } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
- pp_lib_thermal_controller_names[controller->ucType],
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
- i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
- adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
- if (adev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = pp_lib_thermal_controller_names[controller->ucType];
- info.addr = controller->ucI2cAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
- }
- } else {
- DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
- controller->ucType,
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- }
- }
-}
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen)
-{
- switch (asic_gen) {
- case AMDGPU_PCIE_GEN1:
- return AMDGPU_PCIE_GEN1;
- case AMDGPU_PCIE_GEN2:
- return AMDGPU_PCIE_GEN2;
- case AMDGPU_PCIE_GEN3:
- return AMDGPU_PCIE_GEN3;
- default:
- if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
- (default_gen == AMDGPU_PCIE_GEN3))
- return AMDGPU_PCIE_GEN3;
- else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
- (default_gen == AMDGPU_PCIE_GEN2))
- return AMDGPU_PCIE_GEN2;
- else
- return AMDGPU_PCIE_GEN1;
- }
- return AMDGPU_PCIE_GEN1;
-}
-
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(void *handle, u32 idx)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (idx < adev->pm.dpm.num_of_vce_states)
- return &adev->pm.dpm.vce_states[idx];
-
- return NULL;
-}
-
-int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
-{
- uint32_t clk_freq;
- int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
- low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
- if (ret)
- return 0;
- return clk_freq * 100;
-
- } else {
- return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
- }
-}
-
-int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
-{
- uint32_t clk_freq;
- int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
- low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
- if (ret)
- return 0;
- return clk_freq * 100;
-
- } else {
- return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
- }
-}
-
-int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
-{
- int ret = 0;
- bool swsmu = is_support_sw_smu(adev);
-
- switch (block_type) {
- case AMD_IP_BLOCK_TYPE_UVD:
- case AMD_IP_BLOCK_TYPE_VCE:
- if (swsmu) {
- ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
- } else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu) {
- /*
- * TODO: need a better lock mechanism
- *
- * Here adev->pm.mutex lock protection is enforced on
- * UVD and VCE cases only. Since for other cases, there
- * may be already lock protection in amdgpu_pm.c.
- * This is a quick fix for the deadlock issue below.
- * NFO: task ocltst:2028 blocked for more than 120 seconds.
- * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu
- * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
- * cltst D 0 2028 2026 0x00000000
- * all Trace:
- * __schedule+0x2c0/0x870
- * schedule+0x2c/0x70
- * schedule_preempt_disabled+0xe/0x10
- * __mutex_lock.isra.9+0x26d/0x4e0
- * __mutex_lock_slowpath+0x13/0x20
- * ? __mutex_lock_slowpath+0x13/0x20
- * mutex_lock+0x2f/0x40
- * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
- * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
- * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
- * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
- * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
- * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
- */
- mutex_lock(&adev->pm.mutex);
- ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
- mutex_unlock(&adev->pm.mutex);
- }
- break;
- case AMD_IP_BLOCK_TYPE_GFX:
- case AMD_IP_BLOCK_TYPE_VCN:
- case AMD_IP_BLOCK_TYPE_SDMA:
- if (swsmu)
- ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
- else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
- ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
- break;
- case AMD_IP_BLOCK_TYPE_JPEG:
- if (swsmu)
- ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
- break;
- case AMD_IP_BLOCK_TYPE_GMC:
- case AMD_IP_BLOCK_TYPE_ACP:
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
- ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
- int ret = 0;
-
- if (is_support_sw_smu(adev)) {
- ret = smu_baco_enter(smu);
- } else {
- if (!pp_funcs || !pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* enter BACO state */
- ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
- }
-
- return ret;
-}
-
-int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
- int ret = 0;
-
- if (is_support_sw_smu(adev)) {
- ret = smu_baco_exit(smu);
- } else {
- if (!pp_funcs || !pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* exit BACO state */
- ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
- }
-
- return ret;
-}
-
-int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
- enum pp_mp1_state mp1_state)
-{
- int ret = 0;
-
- if (is_support_sw_smu(adev)) {
- ret = smu_set_mp1_state(&adev->smu, mp1_state);
- } else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_mp1_state) {
- ret = adev->powerplay.pp_funcs->set_mp1_state(
- adev->powerplay.pp_handle,
- mp1_state);
- }
-
- return ret;
-}
-
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
- bool baco_cap;
-
- if (is_support_sw_smu(adev)) {
- return smu_baco_is_support(smu);
- } else {
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
- return false;
-
- if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
- return false;
-
- return baco_cap ? true : false;
- }
-}
-
-int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
-
- if (is_support_sw_smu(adev)) {
- return smu_mode2_reset(smu);
- } else {
- if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
- return -ENOENT;
-
- return pp_funcs->asic_reset_mode_2(pp_handle);
- }
-}
-
-int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
- int ret = 0;
-
- dev_info(adev->dev, "GPU BACO reset\n");
-
- if (is_support_sw_smu(adev)) {
- ret = smu_baco_enter(smu);
- if (ret)
- return ret;
-
- ret = smu_baco_exit(smu);
- if (ret)
- return ret;
- } else {
- if (!pp_funcs
- || !pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* enter BACO state */
- ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
- if (ret)
- return ret;
-
- /* exit BACO state */
- ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
-{
- struct smu_context *smu = &adev->smu;
-
- if (is_support_sw_smu(adev))
- return smu_mode1_reset_is_support(smu);
-
- return false;
-}
-
-int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
-{
- struct smu_context *smu = &adev->smu;
-
- if (is_support_sw_smu(adev))
- return smu_mode1_reset(smu);
-
- return -EOPNOTSUPP;
-}
-
-int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
- enum PP_SMC_POWER_PROFILE type,
- bool en)
-{
- int ret = 0;
-
- if (is_support_sw_smu(adev))
- ret = smu_switch_power_profile(&adev->smu, type, en);
- else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->switch_power_profile)
- ret = adev->powerplay.pp_funcs->switch_power_profile(
- adev->powerplay.pp_handle, type, en);
-
- return ret;
-}
-
-int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
- uint32_t pstate)
-{
- int ret = 0;
-
- if (is_support_sw_smu(adev))
- ret = smu_set_xgmi_pstate(&adev->smu, pstate);
- else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_xgmi_pstate)
- ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
- pstate);
-
- return ret;
-}
-
-int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
- uint32_t cstate)
-{
- int ret = 0;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
-
- if (is_support_sw_smu(adev))
- ret = smu_set_df_cstate(smu, cstate);
- else if (pp_funcs &&
- pp_funcs->set_df_cstate)
- ret = pp_funcs->set_df_cstate(pp_handle, cstate);
-
- return ret;
-}
-
-int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
-{
- struct smu_context *smu = &adev->smu;
-
- if (is_support_sw_smu(adev))
- return smu_allow_xgmi_power_down(smu, en);
-
- return 0;
-}
-
-int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
-{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs =
- adev->powerplay.pp_funcs;
- struct smu_context *smu = &adev->smu;
- int ret = 0;
-
- if (is_support_sw_smu(adev))
- ret = smu_enable_mgpu_fan_boost(smu);
- else if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
- ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
-
- return ret;
-}
-
-int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
- uint32_t msg_id)
-{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs =
- adev->powerplay.pp_funcs;
- int ret = 0;
-
- if (pp_funcs && pp_funcs->set_clockgating_by_smu)
- ret = pp_funcs->set_clockgating_by_smu(pp_handle,
- msg_id);
-
- return ret;
-}
-
-int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
- bool acquire)
-{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs =
- adev->powerplay.pp_funcs;
- int ret = -EOPNOTSUPP;
-
- if (pp_funcs && pp_funcs->smu_i2c_bus_access)
- ret = pp_funcs->smu_i2c_bus_access(pp_handle,
- acquire);
-
- return ret;
-}
-
-void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- if (power_supply_is_system_supplied() > 0)
- adev->pm.ac_power = true;
- else
- adev->pm.ac_power = false;
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->enable_bapm)
- amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
- mutex_unlock(&adev->pm.mutex);
-
- if (is_support_sw_smu(adev))
- smu_set_ac_dc(&adev->smu);
- }
-}
-
-int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
- void *data, uint32_t *size)
-{
- int ret = 0;
-
- if (!data || !size)
- return -EINVAL;
-
- if (is_support_sw_smu(adev))
- ret = smu_read_sensor(&adev->smu, sensor, data, size);
- else {
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
- sensor, data, size);
- else
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
-{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device,
- pm.dpm.thermal.work);
- /* switch to the thermal state */
- enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
- int temp, size = sizeof(temp);
-
- if (!adev->pm.dpm_enabled)
- return;
-
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
- (void *)&temp, &size)) {
- if (temp < adev->pm.dpm.thermal.min_temp)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- } else {
- if (adev->pm.dpm.thermal.high_to_low)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- }
- mutex_lock(&adev->pm.mutex);
- if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
- adev->pm.dpm.thermal_active = true;
- else
- adev->pm.dpm.thermal_active = false;
- adev->pm.dpm.state = dpm_state;
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
-}
-
-static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
- enum amd_pm_state_type dpm_state)
-{
- int i;
- struct amdgpu_ps *ps;
- u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
-
- /* check if the vblank period is too short to adjust the mclk */
- if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
- if (amdgpu_dpm_vblank_too_short(adev))
- single_display = false;
- }
-
- /* certain older asics have a separare 3D performance state,
- * so try that first if the user selected performance
- */
- if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
- dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
- /* balanced states don't exist at the moment */
- if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
-
-restart_search:
- /* Pick the best power state based on current conditions */
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- ps = &adev->pm.dpm.ps[i];
- ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
- switch (dpm_state) {
- /* user states */
- case POWER_STATE_TYPE_BATTERY:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_BALANCED:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_PERFORMANCE:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- /* internal states */
- case POWER_STATE_TYPE_INTERNAL_UVD:
- if (adev->pm.dpm.uvd_ps)
- return adev->pm.dpm.uvd_ps;
- else
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_BOOT:
- return adev->pm.dpm.boot_ps;
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ULV:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- return ps;
- break;
- default:
- break;
- }
- }
- /* use a fallback state if we didn't match */
- switch (dpm_state) {
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (adev->pm.dpm.uvd_ps) {
- return adev->pm.dpm.uvd_ps;
- } else {
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- }
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- dpm_state = POWER_STATE_TYPE_BATTERY;
- goto restart_search;
- case POWER_STATE_TYPE_BATTERY:
- case POWER_STATE_TYPE_BALANCED:
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- default:
- break;
- }
-
- return NULL;
-}
-
-static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
-{
- struct amdgpu_ps *ps;
- enum amd_pm_state_type dpm_state;
- int ret;
- bool equal = false;
-
- /* if dpm init failed */
- if (!adev->pm.dpm_enabled)
- return;
-
- if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
- /* add other state override checks here */
- if ((!adev->pm.dpm.thermal_active) &&
- (!adev->pm.dpm.uvd_active))
- adev->pm.dpm.state = adev->pm.dpm.user_state;
- }
- dpm_state = adev->pm.dpm.state;
-
- ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
- if (ps)
- adev->pm.dpm.requested_ps = ps;
- else
- return;
-
- if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
- printk("switching from power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
- printk("switching to power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
- }
-
- /* update whether vce is active */
- ps->vce_active = adev->pm.dpm.vce_active;
- if (adev->powerplay.pp_funcs->display_configuration_changed)
- amdgpu_dpm_display_configuration_changed(adev);
-
- ret = amdgpu_dpm_pre_set_power_state(adev);
- if (ret)
- return;
-
- if (adev->powerplay.pp_funcs->check_state_equal) {
- if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
- equal = false;
- }
-
- if (equal)
- return;
-
- amdgpu_dpm_set_power_state(adev);
- amdgpu_dpm_post_set_power_state(adev);
-
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
- if (adev->powerplay.pp_funcs->force_performance_level) {
- if (adev->pm.dpm.thermal_active) {
- enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
- /* force low perf level for thermal */
- amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
- /* save the user's level */
- adev->pm.dpm.forced_level = level;
- } else {
- /* otherwise, user selected level */
- amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
- }
- }
-}
-
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
-{
- int i = 0;
-
- if (!adev->pm.dpm_enabled)
- return;
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_bandwidth_update(adev);
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->sched.ready)
- amdgpu_fence_wait_empty(ring);
- }
-
- if (is_support_sw_smu(adev)) {
- struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
- smu_handle_task(&adev->smu,
- smu_dpm->dpm_level,
- AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
- true);
- } else {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- if (!amdgpu_device_has_dc_support(adev)) {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
- mutex_unlock(&adev->pm.mutex);
- }
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
- } else {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- amdgpu_dpm_change_power_state_locked(adev);
- mutex_unlock(&adev->pm.mutex);
- }
- }
-}
-
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
-{
- int ret = 0;
-
- if (adev->family == AMDGPU_FAMILY_SI) {
- mutex_lock(&adev->pm.mutex);
- if (enable) {
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- } else {
- adev->pm.dpm.uvd_active = false;
- }
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
- } else {
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
- if (ret)
- DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
-
- /* enable/disable Low Memory PState for UVD (4k videos) */
- if (adev->asic_type == CHIP_STONEY &&
- adev->uvd.decode_image_width >= WIDTH_4K) {
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
- if (hwmgr && hwmgr->hwmgr_func &&
- hwmgr->hwmgr_func->update_nbdpm_pstate)
- hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
- !enable,
- true);
- }
- }
-}
-
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
-{
- int ret = 0;
-
- if (adev->family == AMDGPU_FAMILY_SI) {
- mutex_lock(&adev->pm.mutex);
- if (enable) {
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
- } else {
- adev->pm.dpm.vce_active = false;
- }
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
- } else {
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
- if (ret)
- DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
- }
-}
-
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
-{
- int i;
-
- if (adev->powerplay.pp_funcs->print_power_state == NULL)
- return;
-
- for (i = 0; i < adev->pm.dpm.num_ps; i++)
- amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
-
-}
-
-void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
-{
- int ret = 0;
-
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
- if (ret)
- DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
-}
-
-int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
-{
- int r;
-
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
- r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
- if (r) {
- pr_err("smu firmware loading failed\n");
- return r;
- }
- *smu_version = adev->pm.fw_version;
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
deleted file mode 100644
index dff4a5f99bb0..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ /dev/null
@@ -1,565 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __AMDGPU_DPM_H__
-#define __AMDGPU_DPM_H__
-
-enum amdgpu_int_thermal_type {
- THERMAL_TYPE_NONE,
- THERMAL_TYPE_EXTERNAL,
- THERMAL_TYPE_EXTERNAL_GPIO,
- THERMAL_TYPE_RV6XX,
- THERMAL_TYPE_RV770,
- THERMAL_TYPE_ADT7473_WITH_INTERNAL,
- THERMAL_TYPE_EVERGREEN,
- THERMAL_TYPE_SUMO,
- THERMAL_TYPE_NI,
- THERMAL_TYPE_SI,
- THERMAL_TYPE_EMC2103_WITH_INTERNAL,
- THERMAL_TYPE_CI,
- THERMAL_TYPE_KV,
-};
-
-enum amdgpu_dpm_auto_throttle_src {
- AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
- AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
- AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
- AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
- AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
- AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-struct amdgpu_ps {
- u32 caps; /* vbios flags */
- u32 class; /* vbios flags */
- u32 class2; /* vbios flags */
- /* UVD clocks */
- u32 vclk;
- u32 dclk;
- /* VCE clocks */
- u32 evclk;
- u32 ecclk;
- bool vce_active;
- enum amd_vce_level vce_level;
- /* asic priv */
- void *ps_priv;
-};
-
-struct amdgpu_dpm_thermal {
- /* thermal interrupt work */
- struct work_struct work;
- /* low temperature threshold */
- int min_temp;
- /* high temperature threshold */
- int max_temp;
- /* edge max emergency(shutdown) temp */
- int max_edge_emergency_temp;
- /* hotspot low temperature threshold */
- int min_hotspot_temp;
- /* hotspot high temperature critical threshold */
- int max_hotspot_crit_temp;
- /* hotspot max emergency(shutdown) temp */
- int max_hotspot_emergency_temp;
- /* memory low temperature threshold */
- int min_mem_temp;
- /* memory high temperature critical threshold */
- int max_mem_crit_temp;
- /* memory max emergency(shutdown) temp */
- int max_mem_emergency_temp;
- /* was last interrupt low to high or high to low */
- bool high_to_low;
- /* interrupt source */
- struct amdgpu_irq_src irq;
-};
-
-enum amdgpu_clk_action
-{
- AMDGPU_SCLK_UP = 1,
- AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum amdgpu_clk_action action;
-};
-
-struct amdgpu_clock_and_voltage_limits {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci;
-};
-
-struct amdgpu_clock_array {
- u32 count;
- u32 *values;
-};
-
-struct amdgpu_clock_voltage_dependency_entry {
- u32 clk;
- u16 v;
-};
-
-struct amdgpu_clock_voltage_dependency_table {
- u32 count;
- struct amdgpu_clock_voltage_dependency_entry *entries;
-};
-
-union amdgpu_cac_leakage_entry {
- struct {
- u16 vddc;
- u32 leakage;
- };
- struct {
- u16 vddc1;
- u16 vddc2;
- u16 vddc3;
- };
-};
-
-struct amdgpu_cac_leakage_table {
- u32 count;
- union amdgpu_cac_leakage_entry *entries;
-};
-
-struct amdgpu_phase_shedding_limits_entry {
- u16 voltage;
- u32 sclk;
- u32 mclk;
-};
-
-struct amdgpu_phase_shedding_limits_table {
- u32 count;
- struct amdgpu_phase_shedding_limits_entry *entries;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_entry {
- u32 vclk;
- u32 dclk;
- u16 v;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_entry {
- u32 ecclk;
- u32 evclk;
- u16 v;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_ppm_table {
- u8 ppm_design;
- u16 cpu_core_number;
- u32 platform_tdp;
- u32 small_ac_platform_tdp;
- u32 platform_tdc;
- u32 small_ac_platform_tdc;
- u32 apu_tdp;
- u32 dgpu_tdp;
- u32 dgpu_ulv_power;
- u32 tj_max;
-};
-
-struct amdgpu_cac_tdp_table {
- u16 tdp;
- u16 configurable_tdp;
- u16 tdc;
- u16 battery_power_limit;
- u16 small_power_limit;
- u16 low_cac_leakage;
- u16 high_cac_leakage;
- u16 maximum_power_delivery_limit;
-};
-
-struct amdgpu_dpm_dynamic_state {
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
- struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
- struct amdgpu_clock_array valid_sclk_values;
- struct amdgpu_clock_array valid_mclk_values;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
- u32 mclk_sclk_ratio;
- u32 sclk_mclk_delta;
- u16 vddc_vddci_delta;
- u16 min_vddc_for_pcie_gen2;
- struct amdgpu_cac_leakage_table cac_leakage_table;
- struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
- struct amdgpu_ppm_table *ppm_table;
- struct amdgpu_cac_tdp_table *cac_tdp_table;
-};
-
-struct amdgpu_dpm_fan {
- u16 t_min;
- u16 t_med;
- u16 t_high;
- u16 pwm_min;
- u16 pwm_med;
- u16 pwm_high;
- u8 t_hyst;
- u32 cycle_delay;
- u16 t_max;
- u8 control_mode;
- u16 default_max_fan_pwm;
- u16 default_fan_output_sensitivity;
- u16 fan_output_sensitivity;
- bool ucode_fan_control;
-};
-
-enum amdgpu_pcie_gen {
- AMDGPU_PCIE_GEN1 = 0,
- AMDGPU_PCIE_GEN2 = 1,
- AMDGPU_PCIE_GEN3 = 2,
- AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-#define amdgpu_dpm_pre_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_post_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_display_configuration_changed(adev) \
- ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_print_power_state(adev, ps) \
- ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
-
-#define amdgpu_dpm_vblank_too_short(adev) \
- ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_enable_bapm(adev, e) \
- ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
-
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
-
-#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
- ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-
-#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
- ((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
- ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
- ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
- ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
- ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
-
-#define amdgpu_dpm_get_sclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \
- ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state))
-
-#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
- ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
-
-#define amdgpu_dpm_get_vce_clock_state(adev, i) \
- ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
-
-#define amdgpu_dpm_get_performance_level(adev) \
- ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_reset_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
- (adev)->powerplay.pp_handle, request))
-
-#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
- ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
- (adev)->powerplay.pp_handle, buf))
-
-#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \
- ((adev)->powerplay.pp_funcs->set_power_profile_mode(\
- (adev)->powerplay.pp_handle, parameter, size))
-
-#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
- ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
- (adev)->powerplay.pp_handle, type, parameter, size))
-
-#define amdgpu_dpm_get_ppfeature_status(adev, buf) \
- ((adev)->powerplay.pp_funcs->get_ppfeature_status(\
- (adev)->powerplay.pp_handle, (buf)))
-
-#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \
- ((adev)->powerplay.pp_funcs->set_ppfeature_status(\
- (adev)->powerplay.pp_handle, (ppfeatures)))
-
-#define amdgpu_dpm_get_gpu_metrics(adev, table) \
- ((adev)->powerplay.pp_funcs->get_gpu_metrics((adev)->powerplay.pp_handle, table))
-
-struct amdgpu_dpm {
- struct amdgpu_ps *ps;
- /* number of valid power states */
- int num_ps;
- /* current power state that is active */
- struct amdgpu_ps *current_ps;
- /* requested power state */
- struct amdgpu_ps *requested_ps;
- /* boot up power state */
- struct amdgpu_ps *boot_ps;
- /* default uvd power state */
- struct amdgpu_ps *uvd_ps;
- /* vce requirements */
- u32 num_of_vce_states;
- struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
- enum amd_vce_level vce_level;
- enum amd_pm_state_type state;
- enum amd_pm_state_type user_state;
- enum amd_pm_state_type last_state;
- enum amd_pm_state_type last_user_state;
- u32 platform_caps;
- u32 voltage_response_time;
- u32 backbias_response_time;
- void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
- struct amdgpu_dpm_dynamic_state dyn_state;
- struct amdgpu_dpm_fan fan;
- u32 tdp_limit;
- u32 near_tdp_limit;
- u32 near_tdp_limit_adjusted;
- u32 sq_ramping_threshold;
- u32 cac_leakage;
- u16 tdp_od_limit;
- u32 tdp_adjustment;
- u16 load_line_slope;
- bool power_control;
- /* special states active */
- bool thermal_active;
- bool uvd_active;
- bool vce_active;
- /* thermal handling */
- struct amdgpu_dpm_thermal thermal;
- /* forced levels */
- enum amd_dpm_forced_level forced_level;
-};
-
-struct amdgpu_pm {
- struct mutex mutex;
- u32 current_sclk;
- u32 current_mclk;
- u32 default_sclk;
- u32 default_mclk;
- struct amdgpu_i2c_chan *i2c_bus;
- bool bus_locked;
- /* internal thermal controller on rv6xx+ */
- enum amdgpu_int_thermal_type int_thermal_type;
- struct device *int_hwmon_dev;
- /* fan control parameters */
- bool no_fan;
- u8 fan_pulses_per_revolution;
- u8 fan_min_rpm;
- u8 fan_max_rpm;
- /* dpm */
- bool dpm_enabled;
- bool sysfs_initialized;
- struct amdgpu_dpm dpm;
- const struct firmware *fw; /* SMC firmware */
- uint32_t fw_version;
- uint32_t pcie_gen_mask;
- uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
- uint32_t smu_prv_buffer_size;
- struct amdgpu_bo *smu_prv_buffer;
- bool ac_power;
- /* powerplay feature */
- uint32_t pp_feature;
-
- /* Used for I2C access to various EEPROMs on relevant ASICs */
- struct i2c_adapter smu_i2c;
- struct list_head pm_attr_list;
-};
-
-#define R600_SSTU_DFLT 0
-#define R600_SST_DFLT 0x00C8
-
-/* XXX are these ok? */
-#define R600_TEMP_RANGE_MIN (90 * 1000)
-#define R600_TEMP_RANGE_MAX (120 * 1000)
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-enum amdgpu_td {
- AMDGPU_TD_AUTO,
- AMDGPU_TD_UP,
- AMDGPU_TD_DOWN,
-};
-
-enum amdgpu_display_watermark {
- AMDGPU_DISPLAY_WATERMARK_LOW = 0,
- AMDGPU_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum amdgpu_display_gap
-{
- AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- AMDGPU_PM_DISPLAY_GAP_VBLANK = 1,
- AMDGPU_PM_DISPLAY_GAP_WATERMARK = 2,
- AMDGPU_PM_DISPLAY_GAP_IGNORE = 3,
-};
-
-void amdgpu_dpm_print_class_info(u32 class, u32 class2);
-void amdgpu_dpm_print_cap_info(u32 caps);
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps);
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
-int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
- void *data, uint32_t *size);
-
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev);
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen);
-
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(void *handle, u32 idx);
-
-int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
- uint32_t block_type, bool gate);
-
-extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
-
-extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
-
-int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
- uint32_t pstate);
-
-int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
- enum PP_SMC_POWER_PROFILE type,
- bool en);
-
-int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
-
-int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
-
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
-
-bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
-int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
-
-int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
- enum pp_mp1_state mp1_state);
-
-int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
-
-int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
-
-int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
- uint32_t cstate);
-
-int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en);
-
-int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev);
-
-int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
- uint32_t msg_id);
-
-int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
- bool acquire);
-
-void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
-
-int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
- void *data, uint32_t *size);
-
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
-
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
-void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
-int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
deleted file mode 100644
index 5fc6a9a13096..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ /dev/null
@@ -1,3613 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rafał Miłecki <zajec5@gmail.com>
- * Alex Deucher <alexdeucher@gmail.com>
- */
-
-#include <drm/drm_debugfs.h>
-
-#include "amdgpu.h"
-#include "amdgpu_drv.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_dpm.h"
-#include "amdgpu_smu.h"
-#include "atom.h"
-#include <linux/pci.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/nospec.h>
-#include <linux/pm_runtime.h>
-#include "hwmgr.h"
-
-static const struct cg_flag_name clocks[] = {
- {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
- {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
- {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
- {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
- {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
- {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
- {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
- {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
- {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
-
- {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
- {0, NULL},
-};
-
-static const struct hwmon_temp_label {
- enum PP_HWMON_TEMP channel;
- const char *label;
-} temp_label[] = {
- {PP_TEMP_EDGE, "edge"},
- {PP_TEMP_JUNCTION, "junction"},
- {PP_TEMP_MEM, "mem"},
-};
-
-/**
- * DOC: power_dpm_state
- *
- * The power_dpm_state file is a legacy interface and is only provided for
- * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
- * certain power related parameters. The file power_dpm_state is used for this.
- * It accepts the following arguments:
- *
- * - battery
- *
- * - balanced
- *
- * - performance
- *
- * battery
- *
- * On older GPUs, the vbios provided a special power state for battery
- * operation. Selecting battery switched to this state. This is no
- * longer provided on newer GPUs so the option does nothing in that case.
- *
- * balanced
- *
- * On older GPUs, the vbios provided a special power state for balanced
- * operation. Selecting balanced switched to this state. This is no
- * longer provided on newer GPUs so the option does nothing in that case.
- *
- * performance
- *
- * On older GPUs, the vbios provided a special power state for performance
- * operation. Selecting performance switched to this state. This is no
- * longer provided on newer GPUs so the option does nothing in that case.
- *
- */
-
-static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_pm_state_type pm;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- if (adev->smu.ppt_funcs->get_current_power_state)
- pm = smu_get_current_power_state(&adev->smu);
- else
- pm = adev->pm.dpm.user_state;
- } else if (adev->powerplay.pp_funcs->get_current_power_state) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- } else {
- pm = adev->pm.dpm.user_state;
- }
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
- (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
-}
-
-static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_pm_state_type state;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (strncmp("battery", buf, strlen("battery")) == 0)
- state = POWER_STATE_TYPE_BATTERY;
- else if (strncmp("balanced", buf, strlen("balanced")) == 0)
- state = POWER_STATE_TYPE_BALANCED;
- else if (strncmp("performance", buf, strlen("performance")) == 0)
- state = POWER_STATE_TYPE_PERFORMANCE;
- else
- return -EINVAL;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.user_state = state;
- mutex_unlock(&adev->pm.mutex);
- } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.user_state = state;
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-
-/**
- * DOC: power_dpm_force_performance_level
- *
- * The amdgpu driver provides a sysfs API for adjusting certain power
- * related parameters. The file power_dpm_force_performance_level is
- * used for this. It accepts the following arguments:
- *
- * - auto
- *
- * - low
- *
- * - high
- *
- * - manual
- *
- * - profile_standard
- *
- * - profile_min_sclk
- *
- * - profile_min_mclk
- *
- * - profile_peak
- *
- * auto
- *
- * When auto is selected, the driver will attempt to dynamically select
- * the optimal power profile for current conditions in the driver.
- *
- * low
- *
- * When low is selected, the clocks are forced to the lowest power state.
- *
- * high
- *
- * When high is selected, the clocks are forced to the highest power state.
- *
- * manual
- *
- * When manual is selected, the user can manually adjust which power states
- * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
- * and pp_dpm_pcie files and adjust the power state transition heuristics
- * via the pp_power_profile_mode sysfs file.
- *
- * profile_standard
- * profile_min_sclk
- * profile_min_mclk
- * profile_peak
- *
- * When the profiling modes are selected, clock and power gating are
- * disabled and the clocks are set for different profiling cases. This
- * mode is recommended for profiling specific work loads where you do
- * not want clock or power gating for clock fluctuation to interfere
- * with your results. profile_standard sets the clocks to a fixed clock
- * level which varies from asic to asic. profile_min_sclk forces the sclk
- * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
- * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
- *
- */
-
-static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_dpm_forced_level level = 0xff;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- level = smu_get_performance_level(&adev->smu);
- else if (adev->powerplay.pp_funcs->get_performance_level)
- level = amdgpu_dpm_get_performance_level(adev);
- else
- level = adev->pm.dpm.forced_level;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
- (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
- (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
- (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
- "unknown");
-}
-
-static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_dpm_forced_level level;
- enum amd_dpm_forced_level current_level = 0xff;
- int ret = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (strncmp("low", buf, strlen("low")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_LOW;
- } else if (strncmp("high", buf, strlen("high")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_HIGH;
- } else if (strncmp("auto", buf, strlen("auto")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_AUTO;
- } else if (strncmp("manual", buf, strlen("manual")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_MANUAL;
- } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
- } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
- } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
- } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
- } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
- } else {
- return -EINVAL;
- }
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- current_level = smu_get_performance_level(&adev->smu);
- else if (adev->powerplay.pp_funcs->get_performance_level)
- current_level = amdgpu_dpm_get_performance_level(adev);
-
- if (current_level == level) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return count;
- }
-
- if (adev->asic_type == CHIP_RAVEN) {
- if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
- if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
- amdgpu_gfx_off_ctrl(adev, false);
- else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
- amdgpu_gfx_off_ctrl(adev, true);
- }
- }
-
- /* profile_exit setting is valid only when current mode is in profile mode */
- if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
- pr_err("Currently not in any profile mode!\n");
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
-
- if (is_support_sw_smu(adev)) {
- ret = smu_force_performance_level(&adev->smu, level);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- } else if (adev->powerplay.pp_funcs->force_performance_level) {
- mutex_lock(&adev->pm.mutex);
- if (adev->pm.dpm.thermal_active) {
- mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- ret = amdgpu_dpm_force_performance_level(adev, level);
- if (ret) {
- mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- } else {
- adev->pm.dpm.forced_level = level;
- }
- mutex_unlock(&adev->pm.mutex);
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_num_states(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct pp_states_info data;
- int i, buf_len, ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- ret = smu_get_power_num_states(&adev->smu, &data);
- if (ret)
- return ret;
- } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
- amdgpu_dpm_get_pp_num_states(adev, &data);
- } else {
- memset(&data, 0, sizeof(data));
- }
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
- for (i = 0; i < data.nums; i++)
- buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
- (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
- (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
- (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
- (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
-
- return buf_len;
-}
-
-static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct pp_states_info data;
- struct smu_context *smu = &adev->smu;
- enum amd_pm_state_type pm = 0;
- int i = 0, ret = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- pm = smu_get_current_power_state(smu);
- ret = smu_get_power_num_states(smu, &data);
- if (ret)
- return ret;
- } else if (adev->powerplay.pp_funcs->get_current_power_state
- && adev->powerplay.pp_funcs->get_pp_num_states) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- amdgpu_dpm_get_pp_num_states(adev, &data);
- }
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- for (i = 0; i < data.nums; i++) {
- if (pm == data.states[i])
- break;
- }
-
- if (i == data.nums)
- i = -EINVAL;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", i);
-}
-
-static ssize_t amdgpu_get_pp_force_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (adev->pp_force_state_enabled)
- return amdgpu_get_pp_cur_state(dev, attr, buf);
- else
- return snprintf(buf, PAGE_SIZE, "\n");
-}
-
-static ssize_t amdgpu_set_pp_force_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_pm_state_type state = 0;
- unsigned long idx;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (strlen(buf) == 1)
- adev->pp_force_state_enabled = false;
- else if (is_support_sw_smu(adev))
- adev->pp_force_state_enabled = false;
- else if (adev->powerplay.pp_funcs->dispatch_tasks &&
- adev->powerplay.pp_funcs->get_pp_num_states) {
- struct pp_states_info data;
-
- ret = kstrtoul(buf, 0, &idx);
- if (ret || idx >= ARRAY_SIZE(data.states))
- return -EINVAL;
-
- idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
-
- amdgpu_dpm_get_pp_num_states(adev, &data);
- state = data.states[idx];
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- /* only set user selected power states */
- if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
- state != POWER_STATE_TYPE_DEFAULT) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_TASK_ENABLE_USER_STATE, &state);
- adev->pp_force_state_enabled = true;
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- }
-
- return count;
-}
-
-/**
- * DOC: pp_table
- *
- * The amdgpu driver provides a sysfs API for uploading new powerplay
- * tables. The file pp_table is used for this. Reading the file
- * will dump the current power play table. Writing to the file
- * will attempt to upload a new powerplay table and re-initialize
- * powerplay using that new table.
- *
- */
-
-static ssize_t amdgpu_get_pp_table(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- char *table = NULL;
- int size, ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- if (size < 0)
- return size;
- } else if (adev->powerplay.pp_funcs->get_pp_table) {
- size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- if (size < 0)
- return size;
- } else {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return 0;
- }
-
- if (size >= PAGE_SIZE)
- size = PAGE_SIZE - 1;
-
- memcpy(buf, table, size);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_table(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
- } else if (adev->powerplay.pp_funcs->set_pp_table)
- amdgpu_dpm_set_pp_table(adev, buf, count);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-/**
- * DOC: pp_od_clk_voltage
- *
- * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
- * in each power level within a power state. The pp_od_clk_voltage is used for
- * this.
- *
- * Note that the actual memory controller clock rate are exposed, not
- * the effective memory clock of the DRAMs. To translate it, use the
- * following formula:
- *
- * Clock conversion (Mhz):
- *
- * HBM: effective_memory_clock = memory_controller_clock * 1
- *
- * G5: effective_memory_clock = memory_controller_clock * 1
- *
- * G6: effective_memory_clock = memory_controller_clock * 2
- *
- * DRAM data rate (MT/s):
- *
- * HBM: effective_memory_clock * 2 = data_rate
- *
- * G5: effective_memory_clock * 4 = data_rate
- *
- * G6: effective_memory_clock * 8 = data_rate
- *
- * Bandwidth (MB/s):
- *
- * data_rate * vram_bit_width / 8 = memory_bandwidth
- *
- * Some examples:
- *
- * G5 on RX460:
- *
- * memory_controller_clock = 1750 Mhz
- *
- * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
- *
- * data rate = 1750 * 4 = 7000 MT/s
- *
- * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
- *
- * G6 on RX5700:
- *
- * memory_controller_clock = 875 Mhz
- *
- * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
- *
- * data rate = 1750 * 8 = 14000 MT/s
- *
- * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
- *
- * < For Vega10 and previous ASICs >
- *
- * Reading the file will display:
- *
- * - a list of engine clock levels and voltages labeled OD_SCLK
- *
- * - a list of memory clock levels and voltages labeled OD_MCLK
- *
- * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
- *
- * To manually adjust these settings, first select manual using
- * power_dpm_force_performance_level. Enter a new value for each
- * level by writing a string that contains "s/m level clock voltage" to
- * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
- * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
- * 810 mV. When you have edited all of the states as needed, write
- * "c" (commit) to the file to commit your changes. If you want to reset to the
- * default power levels, write "r" (reset) to the file to reset them.
- *
- *
- * < For Vega20 and newer ASICs >
- *
- * Reading the file will display:
- *
- * - minimum and maximum engine clock labeled OD_SCLK
- *
- * - maximum memory clock labeled OD_MCLK
- *
- * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
- * They can be used to calibrate the sclk voltage curve.
- *
- * - a list of valid ranges for sclk, mclk, and voltage curve points
- * labeled OD_RANGE
- *
- * To manually adjust these settings:
- *
- * - First select manual using power_dpm_force_performance_level
- *
- * - For clock frequency setting, enter a new value by writing a
- * string that contains "s/m index clock" to the file. The index
- * should be 0 if to set minimum clock. And 1 if to set maximum
- * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
- * "m 1 800" will update maximum mclk to be 800Mhz.
- *
- * For sclk voltage curve, enter the new values by writing a
- * string that contains "vc point clock voltage" to the file. The
- * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
- * update point1 with clock set as 300Mhz and voltage as
- * 600mV. "vc 2 1000 1000" will update point3 with clock set
- * as 1000Mhz and voltage 1000mV.
- *
- * - When you have edited all of the states as needed, write "c" (commit)
- * to the file to commit your changes
- *
- * - If you want to reset to the default power levels, write "r" (reset)
- * to the file to reset them
- *
- */
-
-static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t parameter_size = 0;
- long parameter[64];
- char buf_cpy[128];
- char *tmp_str;
- char *sub_str;
- const char delimiter[3] = {' ', '\n', '\0'};
- uint32_t type;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (count > 127)
- return -EINVAL;
-
- if (*buf == 's')
- type = PP_OD_EDIT_SCLK_VDDC_TABLE;
- else if (*buf == 'm')
- type = PP_OD_EDIT_MCLK_VDDC_TABLE;
- else if(*buf == 'r')
- type = PP_OD_RESTORE_DEFAULT_TABLE;
- else if (*buf == 'c')
- type = PP_OD_COMMIT_DPM_TABLE;
- else if (!strncmp(buf, "vc", 2))
- type = PP_OD_EDIT_VDDC_CURVE;
- else
- return -EINVAL;
-
- memcpy(buf_cpy, buf, count+1);
-
- tmp_str = buf_cpy;
-
- if (type == PP_OD_EDIT_VDDC_CURVE)
- tmp_str++;
- while (isspace(*++tmp_str));
-
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
- ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
- if (ret)
- return -EINVAL;
- parameter_size++;
-
- while (isspace(*tmp_str))
- tmp_str++;
- }
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- ret = smu_od_edit_dpm_table(&adev->smu, type,
- parameter, parameter_size);
-
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- } else {
- if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
- ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
- parameter, parameter_size);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
-
- if (type == PP_OD_COMMIT_DPM_TABLE) {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_TASK_READJUST_POWER_STATE,
- NULL);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return count;
- } else {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
- } else if (adev->powerplay.pp_funcs->print_clock_levels) {
- size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
- size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
- } else {
- size = snprintf(buf, PAGE_SIZE, "\n");
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-/**
- * DOC: pp_features
- *
- * The amdgpu driver provides a sysfs API for adjusting what powerplay
- * features to be enabled. The file pp_features is used for this. And
- * this is only available for Vega10 and later dGPUs.
- *
- * Reading back the file will show you the followings:
- * - Current ppfeature masks
- * - List of the all supported powerplay features with their naming,
- * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
- *
- * To manually enable or disable a specific feature, just set or clear
- * the corresponding bit from original ppfeature masks and input the
- * new ppfeature masks.
- */
-static ssize_t amdgpu_set_pp_features(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint64_t featuremask;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = kstrtou64(buf, 0, &featuremask);
- if (ret)
- return -EINVAL;
-
- pr_debug("featuremask = 0x%llx\n", featuremask);
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
- ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_features(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
- else if (adev->powerplay.pp_funcs->get_ppfeature_status)
- size = amdgpu_dpm_get_ppfeature_status(adev, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-/**
- * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
- *
- * The amdgpu driver provides a sysfs API for adjusting what power levels
- * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
- * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
- * this.
- *
- * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
- * Vega10 and later ASICs.
- * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
- *
- * Reading back the files will show you the available power levels within
- * the power state and the clock information for those levels.
- *
- * To manually adjust these states, first select manual using
- * power_dpm_force_performance_level.
- * Secondly, enter a new value for each level by inputing a string that
- * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
- * E.g.,
- *
- * .. code-block:: bash
- *
- * echo "4 5 6" > pp_dpm_sclk
- *
- * will enable sclk levels 4, 5, and 6.
- *
- * NOTE: change to the dcefclk max dpm level is not supported now
- */
-
-static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-/*
- * Worst case: 32 bits individually specified, in octal at 12 characters
- * per line (+1 for \n).
- */
-#define AMDGPU_MASK_BUF_MAX (32 * 13)
-
-static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
-{
- int ret;
- long level;
- char *sub_str = NULL;
- char *tmp;
- char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
- const char delimiter[3] = {' ', '\n', '\0'};
- size_t bytes;
-
- *mask = 0;
-
- bytes = min(count, sizeof(buf_cpy) - 1);
- memcpy(buf_cpy, buf, bytes);
- buf_cpy[bytes] = '\0';
- tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
- if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
- if (ret)
- return -EINVAL;
- *mask |= 1 << level;
- } else
- break;
- }
-
- return 0;
-}
-
-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t mask = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t mask = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t mask = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t mask = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t mask = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t mask = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t value = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
- else if (adev->powerplay.pp_funcs->get_sclk_od)
- value = amdgpu_dpm_get_sclk_od(adev);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- long int value;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = kstrtol(buf, 0, &value);
-
- if (ret)
- return -EINVAL;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
- } else {
- if (adev->powerplay.pp_funcs->set_sclk_od)
- amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
-
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- } else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
- }
- }
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t value = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
- else if (adev->powerplay.pp_funcs->get_mclk_od)
- value = amdgpu_dpm_get_mclk_od(adev);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- long int value;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = kstrtol(buf, 0, &value);
-
- if (ret)
- return -EINVAL;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
- } else {
- if (adev->powerplay.pp_funcs->set_mclk_od)
- amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
-
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- } else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
- }
- }
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-/**
- * DOC: pp_power_profile_mode
- *
- * The amdgpu driver provides a sysfs API for adjusting the heuristics
- * related to switching between power levels in a power state. The file
- * pp_power_profile_mode is used for this.
- *
- * Reading this file outputs a list of all of the predefined power profiles
- * and the relevant heuristics settings for that profile.
- *
- * To select a profile or create a custom profile, first select manual using
- * power_dpm_force_performance_level. Writing the number of a predefined
- * profile to pp_power_profile_mode will enable those heuristics. To
- * create a custom set of heuristics, write a string of numbers to the file
- * starting with the number of the custom profile along with a setting
- * for each heuristic parameter. Due to differences across asic families
- * the heuristic parameters vary from family to family.
- *
- */
-
-static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_get_power_profile_mode(&adev->smu, buf);
- else if (adev->powerplay.pp_funcs->get_power_profile_mode)
- size = amdgpu_dpm_get_power_profile_mode(adev, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-
-static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- int ret;
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t parameter_size = 0;
- long parameter[64];
- char *sub_str, buf_cpy[128];
- char *tmp_str;
- uint32_t i = 0;
- char tmp[2];
- long int profile_mode = 0;
- const char delimiter[3] = {' ', '\n', '\0'};
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- tmp[0] = *(buf);
- tmp[1] = '\0';
- ret = kstrtol(tmp, 0, &profile_mode);
- if (ret)
- return -EINVAL;
-
- if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (count < 2 || count > 127)
- return -EINVAL;
- while (isspace(*++buf))
- i++;
- memcpy(buf_cpy, buf, count-i);
- tmp_str = buf_cpy;
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
- ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
- if (ret)
- return -EINVAL;
- parameter_size++;
- while (isspace(*tmp_str))
- tmp_str++;
- }
- }
- parameter[parameter_size] = profile_mode;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
- else if (adev->powerplay.pp_funcs->set_power_profile_mode)
- ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (!ret)
- return count;
-
- return -EINVAL;
-}
-
-/**
- * DOC: gpu_busy_percent
- *
- * The amdgpu driver provides a sysfs API for reading how busy the GPU
- * is as a percentage. The file gpu_busy_percent is used for this.
- * The SMU firmware computes a percentage of load based on the
- * aggregate activity level in the IP cores.
- */
-static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int r, value, size = sizeof(value);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return r;
- }
-
- /* read the IP busy sensor */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
- (void *)&value, &size);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-/**
- * DOC: mem_busy_percent
- *
- * The amdgpu driver provides a sysfs API for reading how busy the VRAM
- * is as a percentage. The file mem_busy_percent is used for this.
- * The SMU firmware computes a percentage of load based on the
- * aggregate activity level in the IP cores.
- */
-static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int r, value, size = sizeof(value);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return r;
- }
-
- /* read the IP busy sensor */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
- (void *)&value, &size);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-/**
- * DOC: pcie_bw
- *
- * The amdgpu driver provides a sysfs API for estimating how much data
- * has been received and sent by the GPU in the last second through PCIe.
- * The file pcie_bw is used for this.
- * The Perf counters count the number of received and sent messages and return
- * those values, as well as the maximum payload size of a PCIe packet (mps).
- * Note that it is not possible to easily and quickly obtain the size of each
- * packet transmitted, so we output the max payload size (mps) to allow for
- * quick estimation of the PCIe bandwidth usage
- */
-static ssize_t amdgpu_get_pcie_bw(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint64_t count0 = 0, count1 = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (adev->flags & AMD_IS_APU)
- return -ENODATA;
-
- if (!adev->asic_funcs->get_pcie_usage)
- return -ENODATA;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
- count0, count1, pcie_get_mps(adev->pdev));
-}
-
-/**
- * DOC: unique_id
- *
- * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
- * The file unique_id is used for this.
- * This will provide a Unique ID that will persist from machine to machine
- *
- * NOTE: This will only work for GFX9 and newer. This file will be absent
- * on unsupported ASICs (GFX8 and older)
- */
-static ssize_t amdgpu_get_unique_id(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (adev->unique_id)
- return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
-
- return 0;
-}
-
-/**
- * DOC: thermal_throttling_logging
- *
- * Thermal throttling pulls down the clock frequency and thus the performance.
- * It's an useful mechanism to protect the chip from overheating. Since it
- * impacts performance, the user controls whether it is enabled and if so,
- * the log frequency.
- *
- * Reading back the file shows you the status(enabled or disabled) and
- * the interval(in seconds) between each thermal logging.
- *
- * Writing an integer to the file, sets a new logging interval, in seconds.
- * The value should be between 1 and 3600. If the value is less than 1,
- * thermal logging is disabled. Values greater than 3600 are ignored.
- */
-static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
-
- return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
- adev->ddev->unique,
- atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
- adev->throttling_logging_rs.interval / HZ + 1);
-}
-
-static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- long throttling_logging_interval;
- unsigned long flags;
- int ret = 0;
-
- ret = kstrtol(buf, 0, &throttling_logging_interval);
- if (ret)
- return ret;
-
- if (throttling_logging_interval > 3600)
- return -EINVAL;
-
- if (throttling_logging_interval > 0) {
- raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
- /*
- * Reset the ratelimit timer internals.
- * This can effectively restart the timer.
- */
- adev->throttling_logging_rs.interval =
- (throttling_logging_interval - 1) * HZ;
- adev->throttling_logging_rs.begin = 0;
- adev->throttling_logging_rs.printed = 0;
- adev->throttling_logging_rs.missed = 0;
- raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
-
- atomic_set(&adev->throttling_logging_enabled, 1);
- } else {
- atomic_set(&adev->throttling_logging_enabled, 0);
- }
-
- return count;
-}
-
-/**
- * DOC: gpu_metrics
- *
- * The amdgpu driver provides a sysfs API for retrieving current gpu
- * metrics data. The file gpu_metrics is used for this. Reading the
- * file will dump all the current gpu metrics data.
- *
- * These data include temperature, frequency, engines utilization,
- * power consume, throttler status, fan speed and cpu core statistics(
- * available for APU only). That's it will give a snapshot of all sensors
- * at the same time.
- */
-static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- void *gpu_metrics;
- ssize_t size = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
- else if (adev->powerplay.pp_funcs->get_gpu_metrics)
- size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
-
- if (size <= 0)
- goto out;
-
- if (size >= PAGE_SIZE)
- size = PAGE_SIZE - 1;
-
- memcpy(buf, gpu_metrics, size);
-
-out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static struct amdgpu_device_attr amdgpu_device_attrs[] = {
- AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
-};
-
-static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
- uint32_t mask, enum amdgpu_device_attr_states *states)
-{
- struct device_attribute *dev_attr = &attr->dev_attr;
- const char *attr_name = dev_attr->attr.name;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- enum amd_asic_type asic_type = adev->asic_type;
-
- if (!(attr->flags & mask)) {
- *states = ATTR_STATE_UNSUPPORTED;
- return 0;
- }
-
-#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
-
- if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
- if (asic_type < CHIP_VEGA10)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
- if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
- if (asic_type < CHIP_VEGA20)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
- if (asic_type == CHIP_ARCTURUS)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
- *states = ATTR_STATE_UNSUPPORTED;
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
- *states = ATTR_STATE_SUPPORTED;
- } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
- if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pcie_bw)) {
- /* PCIe Perf counters won't work on APU nodes */
- if (adev->flags & AMD_IS_APU)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(unique_id)) {
- if (asic_type != CHIP_VEGA10 &&
- asic_type != CHIP_VEGA20 &&
- asic_type != CHIP_ARCTURUS)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_features)) {
- if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(gpu_metrics)) {
- if (asic_type < CHIP_VEGA12)
- *states = ATTR_STATE_UNSUPPORTED;
- }
-
- if (asic_type == CHIP_ARCTURUS) {
- /* Arcturus does not support standalone mclk/socclk/fclk level setting */
- if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
- DEVICE_ATTR_IS(pp_dpm_socclk) ||
- DEVICE_ATTR_IS(pp_dpm_fclk)) {
- dev_attr->attr.mode &= ~S_IWUGO;
- dev_attr->store = NULL;
- }
- }
-
-#undef DEVICE_ATTR_IS
-
- return 0;
-}
-
-
-static int amdgpu_device_attr_create(struct amdgpu_device *adev,
- struct amdgpu_device_attr *attr,
- uint32_t mask, struct list_head *attr_list)
-{
- int ret = 0;
- struct device_attribute *dev_attr = &attr->dev_attr;
- const char *name = dev_attr->attr.name;
- enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
- struct amdgpu_device_attr_entry *attr_entry;
-
- int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
- uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
-
- BUG_ON(!attr);
-
- attr_update = attr->attr_update ? attr_update : default_attr_update;
-
- ret = attr_update(adev, attr, mask, &attr_states);
- if (ret) {
- dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
- name, ret);
- return ret;
- }
-
- if (attr_states == ATTR_STATE_UNSUPPORTED)
- return 0;
-
- ret = device_create_file(adev->dev, dev_attr);
- if (ret) {
- dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
- name, ret);
- }
-
- attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
- if (!attr_entry)
- return -ENOMEM;
-
- attr_entry->attr = attr;
- INIT_LIST_HEAD(&attr_entry->entry);
-
- list_add_tail(&attr_entry->entry, attr_list);
-
- return ret;
-}
-
-static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
-{
- struct device_attribute *dev_attr = &attr->dev_attr;
-
- device_remove_file(adev->dev, dev_attr);
-}
-
-static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
- struct list_head *attr_list);
-
-static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
- struct amdgpu_device_attr *attrs,
- uint32_t counts,
- uint32_t mask,
- struct list_head *attr_list)
-{
- int ret = 0;
- uint32_t i = 0;
-
- for (i = 0; i < counts; i++) {
- ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
- if (ret)
- goto failed;
- }
-
- return 0;
-
-failed:
- amdgpu_device_attr_remove_groups(adev, attr_list);
-
- return ret;
-}
-
-static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
- struct list_head *attr_list)
-{
- struct amdgpu_device_attr_entry *entry, *entry_tmp;
-
- if (list_empty(attr_list))
- return ;
-
- list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
- amdgpu_device_attr_remove(adev, entry->attr);
- list_del(&entry->entry);
- kfree(entry);
- }
-}
-
-static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int channel = to_sensor_dev_attr(attr)->index;
- int r, temp = 0, size = sizeof(temp);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (channel >= PP_TEMP_MAX)
- return -EINVAL;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- switch (channel) {
- case PP_TEMP_JUNCTION:
- /* get current junction temperature */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
- (void *)&temp, &size);
- break;
- case PP_TEMP_EDGE:
- /* get current edge temperature */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
- (void *)&temp, &size);
- break;
- case PP_TEMP_MEM:
- /* get current memory temperature */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
- (void *)&temp, &size);
- break;
- default:
- r = -EINVAL;
- break;
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int hyst = to_sensor_dev_attr(attr)->index;
- int temp;
-
- if (hyst)
- temp = adev->pm.dpm.thermal.min_temp;
- else
- temp = adev->pm.dpm.thermal.max_temp;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int hyst = to_sensor_dev_attr(attr)->index;
- int temp;
-
- if (hyst)
- temp = adev->pm.dpm.thermal.min_hotspot_temp;
- else
- temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int hyst = to_sensor_dev_attr(attr)->index;
- int temp;
-
- if (hyst)
- temp = adev->pm.dpm.thermal.min_mem_temp;
- else
- temp = adev->pm.dpm.thermal.max_mem_crit_temp;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int channel = to_sensor_dev_attr(attr)->index;
-
- if (channel >= PP_TEMP_MAX)
- return -EINVAL;
-
- return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
-}
-
-static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int channel = to_sensor_dev_attr(attr)->index;
- int temp = 0;
-
- if (channel >= PP_TEMP_MAX)
- return -EINVAL;
-
- switch (channel) {
- case PP_TEMP_JUNCTION:
- temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
- break;
- case PP_TEMP_EDGE:
- temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
- break;
- case PP_TEMP_MEM:
- temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
- break;
- }
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 pwm_mode = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(adev->ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- } else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -EINVAL;
- }
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return sprintf(buf, "%i\n", pwm_mode);
-}
-
-static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err, ret;
- int value;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
-
- ret = pm_runtime_get_sync(adev->ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- smu_set_fan_control_mode(&adev->smu, value);
- } else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -EINVAL;
- }
-
- amdgpu_dpm_set_fan_control_mode(adev, value);
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", 0);
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", 255);
-}
-
-static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 value;
- u32 pwm_mode;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- else
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
-
- if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
- pr_info("manual fan speed control should be enabled first\n");
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -EINVAL;
- }
-
- err = kstrtou32(buf, 10, &value);
- if (err) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- value = (value * 100) / 255;
-
- if (is_support_sw_smu(adev))
- err = smu_set_fan_speed_percent(&adev->smu, value);
- else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
- err = amdgpu_dpm_set_fan_speed_percent(adev, value);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 speed = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- err = smu_get_fan_speed_percent(&adev->smu, &speed);
- else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
- err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- speed = (speed * 255) / 100;
-
- return sprintf(buf, "%i\n", speed);
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 speed = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- err = smu_get_fan_speed_rpm(&adev->smu, &speed);
- else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- return sprintf(buf, "%i\n", speed);
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 min_rpm = 0;
- u32 size = sizeof(min_rpm);
- int r;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
- (void *)&min_rpm, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 max_rpm = 0;
- u32 size = sizeof(max_rpm);
- int r;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
- (void *)&max_rpm, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 rpm = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
- else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- return sprintf(buf, "%i\n", rpm);
-}
-
-static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 value;
- u32 pwm_mode;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- else
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
-
- if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -ENODATA;
- }
-
- err = kstrtou32(buf, 10, &value);
- if (err) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- err = smu_set_fan_speed_rpm(&adev->smu, value);
- else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
- err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 pwm_mode = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(adev->ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- } else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -EINVAL;
- }
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
-}
-
-static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- int value;
- u32 pwm_mode;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
-
- if (value == 0)
- pwm_mode = AMD_FAN_CTRL_AUTO;
- else if (value == 1)
- pwm_mode = AMD_FAN_CTRL_MANUAL;
- else
- return -EINVAL;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev)) {
- smu_set_fan_control_mode(&adev->smu, pwm_mode);
- } else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -EINVAL;
- }
- amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 vddgfx;
- int r, size = sizeof(vddgfx);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- /* get the voltage */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
- (void *)&vddgfx, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
-}
-
-static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "vddgfx\n");
-}
-
-static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 vddnb;
- int r, size = sizeof(vddnb);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- /* only APUs have vddnb */
- if (!(adev->flags & AMD_IS_APU))
- return -EINVAL;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- /* get the voltage */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
- (void *)&vddnb, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
-}
-
-static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "vddnb\n");
-}
-
-static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 query = 0;
- int r, size = sizeof(u32);
- unsigned uw;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- /* get the voltage */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
- (void *)&query, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- /* convert to microwatts */
- uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
-
- return snprintf(buf, PAGE_SIZE, "%u\n", uw);
-}
-
-static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", 0);
-}
-
-static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t limit = 0;
- ssize_t size;
- int r;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, true);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
- adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else {
- size = snprintf(buf, PAGE_SIZE, "\n");
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t limit = 0;
- ssize_t size;
- int r;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, false);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
- adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else {
- size = snprintf(buf, PAGE_SIZE, "\n");
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return size;
-}
-
-
-static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 value;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- err = kstrtou32(buf, 10, &value);
- if (err)
- return err;
-
- value = value / 1000000; /* convert to Watt */
-
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- err = smu_set_power_limit(&adev->smu, value);
- else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
- err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t sclk;
- int r, size = sizeof(sclk);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- /* get the sclk */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
- (void *)&sclk, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
-}
-
-static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "sclk\n");
-}
-
-static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t mclk;
- int r, size = sizeof(mclk);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- /* get the sclk */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
- (void *)&mclk, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
-}
-
-static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "mclk\n");
-}
-
-/**
- * DOC: hwmon
- *
- * The amdgpu driver exposes the following sensor interfaces:
- *
- * - GPU temperature (via the on-die sensor)
- *
- * - GPU voltage
- *
- * - Northbridge voltage (APUs only)
- *
- * - GPU power
- *
- * - GPU fan
- *
- * - GPU gfx/compute engine clock
- *
- * - GPU memory clock (dGPU only)
- *
- * hwmon interfaces for GPU temperature:
- *
- * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
- * - temp2_input and temp3_input are supported on SOC15 dGPUs only
- *
- * - temp[1-3]_label: temperature channel label
- * - temp2_label and temp3_label are supported on SOC15 dGPUs only
- *
- * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
- * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
- *
- * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
- * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
- *
- * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
- * - these are supported on SOC15 dGPUs only
- *
- * hwmon interfaces for GPU voltage:
- *
- * - in0_input: the voltage on the GPU in millivolts
- *
- * - in1_input: the voltage on the Northbridge in millivolts
- *
- * hwmon interfaces for GPU power:
- *
- * - power1_average: average power used by the GPU in microWatts
- *
- * - power1_cap_min: minimum cap supported in microWatts
- *
- * - power1_cap_max: maximum cap supported in microWatts
- *
- * - power1_cap: selected power cap in microWatts
- *
- * hwmon interfaces for GPU fan:
- *
- * - pwm1: pulse width modulation fan level (0-255)
- *
- * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
- *
- * - pwm1_min: pulse width modulation fan control minimum level (0)
- *
- * - pwm1_max: pulse width modulation fan control maximum level (255)
- *
- * - fan1_min: an minimum value Unit: revolution/min (RPM)
- *
- * - fan1_max: an maxmum value Unit: revolution/max (RPM)
- *
- * - fan1_input: fan speed in RPM
- *
- * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
- *
- * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
- *
- * hwmon interfaces for GPU clocks:
- *
- * - freq1_input: the gfx/compute clock in hertz
- *
- * - freq2_input: the memory clock in hertz
- *
- * You can use hwmon tools like sensors to view this information on your system.
- *
- */
-
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
-static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
-static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
-static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
-static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
-static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
-static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
-static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
-static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
-static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
-static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
-static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
-static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
-static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
-
-static struct attribute *hwmon_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_crit.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_emergency.dev_attr.attr,
- &sensor_dev_attr_temp2_emergency.dev_attr.attr,
- &sensor_dev_attr_temp3_emergency.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- &sensor_dev_attr_temp3_label.dev_attr.attr,
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm1_min.dev_attr.attr,
- &sensor_dev_attr_pwm1_max.dev_attr.attr,
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan1_min.dev_attr.attr,
- &sensor_dev_attr_fan1_max.dev_attr.attr,
- &sensor_dev_attr_fan1_target.dev_attr.attr,
- &sensor_dev_attr_fan1_enable.dev_attr.attr,
- &sensor_dev_attr_in0_input.dev_attr.attr,
- &sensor_dev_attr_in0_label.dev_attr.attr,
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in1_label.dev_attr.attr,
- &sensor_dev_attr_power1_average.dev_attr.attr,
- &sensor_dev_attr_power1_cap_max.dev_attr.attr,
- &sensor_dev_attr_power1_cap_min.dev_attr.attr,
- &sensor_dev_attr_power1_cap.dev_attr.attr,
- &sensor_dev_attr_freq1_input.dev_attr.attr,
- &sensor_dev_attr_freq1_label.dev_attr.attr,
- &sensor_dev_attr_freq2_input.dev_attr.attr,
- &sensor_dev_attr_freq2_label.dev_attr.attr,
- NULL
-};
-
-static umode_t hwmon_attributes_visible(struct kobject *kobj,
- struct attribute *attr, int index)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- umode_t effective_mode = attr->mode;
-
- /* under multi-vf mode, the hwmon attributes are all not supported */
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
- /* there is no fan under pp one vf mode */
- if (amdgpu_sriov_is_pp_one_vf(adev) &&
- (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
- return 0;
-
- /* Skip fan attributes if fan is not present */
- if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
- return 0;
-
- /* Skip fan attributes on APU */
- if ((adev->flags & AMD_IS_APU) &&
- (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
- return 0;
-
- /* Skip crit temp on APU */
- if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
- (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
- return 0;
-
- /* Skip limit attributes if DPM is not enabled */
- if (!adev->pm.dpm_enabled &&
- (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
- return 0;
-
- if (!is_support_sw_smu(adev)) {
- /* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->powerplay.pp_funcs->get_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
- effective_mode &= ~S_IRUGO;
-
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->powerplay.pp_funcs->set_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
- effective_mode &= ~S_IWUSR;
- }
-
- if (((adev->flags & AMD_IS_APU) ||
- adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
- adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
- (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
- attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
- return 0;
-
- if (!is_support_sw_smu(adev)) {
- /* hide max/min values if we can't both query and manage the fan */
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
- (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
-
- if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
- return 0;
- }
-
- if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
- adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
- (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
- attr == &sensor_dev_attr_in0_label.dev_attr.attr))
- return 0;
-
- /* only APUs have vddnb */
- if (!(adev->flags & AMD_IS_APU) &&
- (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_in1_label.dev_attr.attr))
- return 0;
-
- /* no mclk on APUs */
- if ((adev->flags & AMD_IS_APU) &&
- (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
- attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
- return 0;
-
- /* only SOC15 dGPUs support hotspot and mem temperatures */
- if (((adev->flags & AMD_IS_APU) ||
- adev->asic_type < CHIP_VEGA10) &&
- (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
- return 0;
-
- return effective_mode;
-}
-
-static const struct attribute_group hwmon_attrgroup = {
- .attrs = hwmon_attributes,
- .is_visible = hwmon_attributes_visible,
-};
-
-static const struct attribute_group *hwmon_groups[] = {
- &hwmon_attrgroup,
- NULL
-};
-
-int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
-{
- int ret;
- uint32_t mask = 0;
-
- if (adev->pm.sysfs_initialized)
- return 0;
-
- if (adev->pm.dpm_enabled == 0)
- return 0;
-
- INIT_LIST_HEAD(&adev->pm.pm_attr_list);
-
- adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
- DRIVER_NAME, adev,
- hwmon_groups);
- if (IS_ERR(adev->pm.int_hwmon_dev)) {
- ret = PTR_ERR(adev->pm.int_hwmon_dev);
- dev_err(adev->dev,
- "Unable to register hwmon device: %d\n", ret);
- return ret;
- }
-
- switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
- case SRIOV_VF_MODE_ONE_VF:
- mask = ATTR_FLAG_ONEVF;
- break;
- case SRIOV_VF_MODE_MULTI_VF:
- mask = 0;
- break;
- case SRIOV_VF_MODE_BARE_METAL:
- default:
- mask = ATTR_FLAG_MASK_ALL;
- break;
- }
-
- ret = amdgpu_device_attr_create_groups(adev,
- amdgpu_device_attrs,
- ARRAY_SIZE(amdgpu_device_attrs),
- mask,
- &adev->pm.pm_attr_list);
- if (ret)
- return ret;
-
- adev->pm.sysfs_initialized = true;
-
- return 0;
-}
-
-void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm_enabled == 0)
- return;
-
- if (adev->pm.int_hwmon_dev)
- hwmon_device_unregister(adev->pm.int_hwmon_dev);
-
- amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
-}
-
-/*
- * Debugfs info
- */
-#if defined(CONFIG_DEBUG_FS)
-
-static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
-{
- uint32_t value;
- uint64_t value64;
- uint32_t query = 0;
- int size;
-
- /* GPU Clocks */
- size = sizeof(value);
- seq_printf(m, "GFX Clocks and Power:\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
- seq_printf(m, "\t%u mV (VDDGFX)\n", value);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
- seq_printf(m, "\t%u mV (VDDNB)\n", value);
- size = sizeof(uint32_t);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
- seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
- size = sizeof(value);
- seq_printf(m, "\n");
-
- /* GPU Temp */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
- seq_printf(m, "GPU Temperature: %u C\n", value/1000);
-
- /* GPU Load */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
- seq_printf(m, "GPU Load: %u %%\n", value);
- /* MEM Load */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
- seq_printf(m, "MEM Load: %u %%\n", value);
-
- seq_printf(m, "\n");
-
- /* SMC feature mask */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
- seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
-
- if (adev->asic_type > CHIP_VEGA20) {
- /* VCN clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "VCN: Disabled\n");
- } else {
- seq_printf(m, "VCN: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
- }
- }
- seq_printf(m, "\n");
- } else {
- /* UVD clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "UVD: Disabled\n");
- } else {
- seq_printf(m, "UVD: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
- }
- }
- seq_printf(m, "\n");
-
- /* VCE clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "VCE: Disabled\n");
- } else {
- seq_printf(m, "VCE: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
- }
- }
- }
-
- return 0;
-}
-
-static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
-{
- int i;
-
- for (i = 0; clocks[i].flag; i++)
- seq_printf(m, "\t%s: %s\n", clocks[i].name,
- (flags & clocks[i].flag) ? "On" : "Off");
-}
-
-static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- u32 flags = 0;
- int r;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(dev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(dev->dev);
- return r;
- }
-
- if (!adev->pm.dpm_enabled) {
- seq_printf(m, "dpm not enabled\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
- return 0;
- }
-
- if (!is_support_sw_smu(adev) &&
- adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
- mutex_lock(&adev->pm.mutex);
- if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
- adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
- else
- seq_printf(m, "Debugfs support not implemented for this asic\n");
- mutex_unlock(&adev->pm.mutex);
- r = 0;
- } else {
- r = amdgpu_debugfs_pm_info_pp(m, adev);
- }
- if (r)
- goto out;
-
- amdgpu_device_ip_get_clockgating_state(adev, &flags);
-
- seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
- amdgpu_parse_cg_state(m, flags);
- seq_printf(m, "\n");
-
-out:
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
-
- return r;
-}
-
-static const struct drm_info_list amdgpu_pm_info_list[] = {
- {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
-};
-#endif
-
-int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
-#else
- return 0;
-#endif
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
deleted file mode 100644
index 45a22e101d15..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __AMDGPU_PM_H__
-#define __AMDGPU_PM_H__
-
-struct cg_flag_name
-{
- u32 flag;
- const char *name;
-};
-
-enum amdgpu_device_attr_flags {
- ATTR_FLAG_BASIC = (1 << 0),
- ATTR_FLAG_ONEVF = (1 << 16),
-};
-
-#define ATTR_FLAG_TYPE_MASK (0x0000ffff)
-#define ATTR_FLAG_MODE_MASK (0xffff0000)
-#define ATTR_FLAG_MASK_ALL (0xffffffff)
-
-enum amdgpu_device_attr_states {
- ATTR_STATE_UNSUPPORTED = 0,
- ATTR_STATE_SUPPORTED,
-};
-
-struct amdgpu_device_attr {
- struct device_attribute dev_attr;
- enum amdgpu_device_attr_flags flags;
- int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
- uint32_t mask, enum amdgpu_device_attr_states *states);
-
-};
-
-struct amdgpu_device_attr_entry {
- struct list_head entry;
- struct amdgpu_device_attr *attr;
-};
-
-#define to_amdgpu_device_attr(_dev_attr) \
- container_of(_dev_attr, struct amdgpu_device_attr, dev_attr)
-
-#define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \
- { .dev_attr = __ATTR(_name, _mode, _show, _store), \
- .flags = _flags, \
- ##__VA_ARGS__, }
-
-#define AMDGPU_DEVICE_ATTR(_name, _mode, _flags, ...) \
- __AMDGPU_DEVICE_ATTR(_name, _mode, \
- amdgpu_get_##_name, amdgpu_set_##_name, \
- _flags, ##__VA_ARGS__)
-
-#define AMDGPU_DEVICE_ATTR_RW(_name, _flags, ...) \
- AMDGPU_DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
- _flags, ##__VA_ARGS__)
-
-#define AMDGPU_DEVICE_ATTR_RO(_name, _flags, ...) \
- __AMDGPU_DEVICE_ATTR(_name, S_IRUGO, \
- amdgpu_get_##_name, NULL, \
- _flags, ##__VA_ARGS__)
-
-int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
-int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev);
-void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
-void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev);
-
-int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
deleted file mode 100644
index 2fcc4b60153c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __CIK_DPM_H__
-#define __CIK_DPM_H__
-
-extern const struct amdgpu_ip_block_version kv_smu_ip_block;
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
deleted file mode 100644
index 4b3faaccecb9..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ /dev/null
@@ -1,3382 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "cikd.h"
-#include "atom.h"
-#include "amdgpu_atombios.h"
-#include "amdgpu_dpm.h"
-#include "kv_dpm.h"
-#include "gfx_v7_0.h"
-#include <linux/seq_file.h>
-
-#include "smu/smu_7_0_0_d.h"
-#include "smu/smu_7_0_0_sh_mask.h"
-
-#include "gca/gfx_7_2_d.h"
-#include "gca/gfx_7_2_sh_mask.h"
-
-#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define KV_MINIMUM_ENGINE_CLOCK 800
-#define SMC_RAM_END 0x40000
-
-static const struct amd_pm_funcs kv_dpm_funcs;
-
-static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
-static int kv_enable_nb_dpm(struct amdgpu_device *adev,
- bool enable);
-static void kv_init_graphics_levels(struct amdgpu_device *adev);
-static int kv_calculate_ds_divider(struct amdgpu_device *adev);
-static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);
-static int kv_calculate_dpm_settings(struct amdgpu_device *adev);
-static void kv_enable_new_levels(struct amdgpu_device *adev);
-static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps);
-static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);
-static int kv_set_enabled_levels(struct amdgpu_device *adev);
-static int kv_force_dpm_highest(struct amdgpu_device *adev);
-static int kv_force_dpm_lowest(struct amdgpu_device *adev);
-static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps,
- struct amdgpu_ps *old_rps);
-static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
- int min_temp, int max_temp);
-static int kv_init_fps_limits(struct amdgpu_device *adev);
-
-static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
-static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
-
-
-static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,
- struct sumo_vid_mapping_table *vid_mapping_table,
- u32 vid_2bit)
-{
- struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 i;
-
- if (vddc_sclk_table && vddc_sclk_table->count) {
- if (vid_2bit < vddc_sclk_table->count)
- return vddc_sclk_table->entries[vid_2bit].v;
- else
- return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
- } else {
- for (i = 0; i < vid_mapping_table->num_entries; i++) {
- if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
- return vid_mapping_table->entries[i].vid_7bit;
- }
- return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
- }
-}
-
-static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,
- struct sumo_vid_mapping_table *vid_mapping_table,
- u32 vid_7bit)
-{
- struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 i;
-
- if (vddc_sclk_table && vddc_sclk_table->count) {
- for (i = 0; i < vddc_sclk_table->count; i++) {
- if (vddc_sclk_table->entries[i].v == vid_7bit)
- return i;
- }
- return vddc_sclk_table->count - 1;
- } else {
- for (i = 0; i < vid_mapping_table->num_entries; i++) {
- if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
- return vid_mapping_table->entries[i].vid_2bit;
- }
-
- return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
- }
-}
-
-static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
-{
-/* This bit selects who handles display phy powergating.
- * Clear the bit to let atom handle it.
- * Set it to let the driver handle it.
- * For now we just let atom handle it.
- */
-#if 0
- u32 v = RREG32(mmDOUT_SCRATCH3);
-
- if (enable)
- v |= 0x4;
- else
- v &= 0xFFFFFFFB;
-
- WREG32(mmDOUT_SCRATCH3, v);
-#endif
-}
-
-static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
- struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
- ATOM_AVAILABLE_SCLK_LIST *table)
-{
- u32 i;
- u32 n = 0;
- u32 prev_sclk = 0;
-
- for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
- if (table[i].ulSupportedSCLK > prev_sclk) {
- sclk_voltage_mapping_table->entries[n].sclk_frequency =
- table[i].ulSupportedSCLK;
- sclk_voltage_mapping_table->entries[n].vid_2bit =
- table[i].usVoltageIndex;
- prev_sclk = table[i].ulSupportedSCLK;
- n++;
- }
- }
-
- sclk_voltage_mapping_table->num_max_dpm_entries = n;
-}
-
-static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
- struct sumo_vid_mapping_table *vid_mapping_table,
- ATOM_AVAILABLE_SCLK_LIST *table)
-{
- u32 i, j;
-
- for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
- if (table[i].ulSupportedSCLK != 0) {
- vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
- table[i].usVoltageID;
- vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
- table[i].usVoltageIndex;
- }
- }
-
- for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
- if (vid_mapping_table->entries[i].vid_7bit == 0) {
- for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
- if (vid_mapping_table->entries[j].vid_7bit != 0) {
- vid_mapping_table->entries[i] =
- vid_mapping_table->entries[j];
- vid_mapping_table->entries[j].vid_7bit = 0;
- break;
- }
- }
-
- if (j == SUMO_MAX_NUMBER_VOLTAGES)
- break;
- }
- }
-
- vid_mapping_table->num_entries = i;
-}
-
-#if 0
-static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 1, 4, 1 },
- { 2, 5, 1 },
- { 3, 4, 2 },
- { 4, 1, 1 },
- { 5, 5, 2 },
- { 6, 6, 1 },
- { 7, 9, 2 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 1, 4, 1 },
- { 2, 5, 1 },
- { 3, 4, 1 },
- { 4, 1, 1 },
- { 5, 5, 1 },
- { 6, 6, 1 },
- { 7, 9, 1 },
- { 8, 4, 1 },
- { 9, 2, 1 },
- { 10, 3, 1 },
- { 11, 6, 1 },
- { 12, 8, 2 },
- { 13, 1, 1 },
- { 14, 2, 1 },
- { 15, 3, 1 },
- { 16, 1, 1 },
- { 17, 4, 1 },
- { 18, 3, 1 },
- { 19, 1, 1 },
- { 20, 8, 1 },
- { 21, 5, 1 },
- { 22, 1, 1 },
- { 23, 1, 1 },
- { 24, 4, 1 },
- { 27, 6, 1 },
- { 28, 1, 1 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
-{
- { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-
-static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
-{
- { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-
-static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
-{
- { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-
-static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
-{
- { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-
-static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
-{
- { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-
-static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
-{
- { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-#endif
-
-static const struct kv_pt_config_reg didt_config_kv[] =
-{
- { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
- { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
- { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
- { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
- { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
- { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
- { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
- { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
- { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
- { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
- { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
- { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
- { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0xFFFFFFFF }
-};
-
-static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)
-{
- struct kv_ps *ps = rps->ps_priv;
-
- return ps;
-}
-
-static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-#if 0
-static void kv_program_local_cac_table(struct amdgpu_device *adev,
- const struct kv_lcac_config_values *local_cac_table,
- const struct kv_lcac_config_reg *local_cac_reg)
-{
- u32 i, count, data;
- const struct kv_lcac_config_values *values = local_cac_table;
-
- while (values->block_id != 0xffffffff) {
- count = values->signal_id;
- for (i = 0; i < count; i++) {
- data = ((values->block_id << local_cac_reg->block_shift) &
- local_cac_reg->block_mask);
- data |= ((i << local_cac_reg->signal_shift) &
- local_cac_reg->signal_mask);
- data |= ((values->t << local_cac_reg->t_shift) &
- local_cac_reg->t_mask);
- data |= ((1 << local_cac_reg->enable_shift) &
- local_cac_reg->enable_mask);
- WREG32_SMC(local_cac_reg->cntl, data);
- }
- values++;
- }
-}
-#endif
-
-static int kv_program_pt_config_registers(struct amdgpu_device *adev,
- const struct kv_pt_config_reg *cac_config_regs)
-{
- const struct kv_pt_config_reg *config_regs = cac_config_regs;
- u32 data;
- u32 cache = 0;
-
- if (config_regs == NULL)
- return -EINVAL;
-
- while (config_regs->offset != 0xFFFFFFFF) {
- if (config_regs->type == KV_CONFIGREG_CACHE) {
- cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- } else {
- switch (config_regs->type) {
- case KV_CONFIGREG_SMC_IND:
- data = RREG32_SMC(config_regs->offset);
- break;
- case KV_CONFIGREG_DIDT_IND:
- data = RREG32_DIDT(config_regs->offset);
- break;
- default:
- data = RREG32(config_regs->offset);
- break;
- }
-
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- data |= cache;
- cache = 0;
-
- switch (config_regs->type) {
- case KV_CONFIGREG_SMC_IND:
- WREG32_SMC(config_regs->offset, data);
- break;
- case KV_CONFIGREG_DIDT_IND:
- WREG32_DIDT(config_regs->offset, data);
- break;
- default:
- WREG32(config_regs->offset, data);
- break;
- }
- }
- config_regs++;
- }
-
- return 0;
-}
-
-static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 data;
-
- if (pi->caps_sq_ramping) {
- data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
- if (enable)
- data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
- }
-
- if (pi->caps_db_ramping) {
- data = RREG32_DIDT(ixDIDT_DB_CTRL0);
- if (enable)
- data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_DB_CTRL0, data);
- }
-
- if (pi->caps_td_ramping) {
- data = RREG32_DIDT(ixDIDT_TD_CTRL0);
- if (enable)
- data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_TD_CTRL0, data);
- }
-
- if (pi->caps_tcp_ramping) {
- data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
- if (enable)
- data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
- }
-}
-
-static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- if (pi->caps_sq_ramping ||
- pi->caps_db_ramping ||
- pi->caps_td_ramping ||
- pi->caps_tcp_ramping) {
- amdgpu_gfx_rlc_enter_safe_mode(adev);
-
- if (enable) {
- ret = kv_program_pt_config_registers(adev, didt_config_kv);
- if (ret) {
- amdgpu_gfx_rlc_exit_safe_mode(adev);
- return ret;
- }
- }
-
- kv_do_enable_didt(adev, enable);
-
- amdgpu_gfx_rlc_exit_safe_mode(adev);
- }
-
- return 0;
-}
-
-#if 0
-static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->caps_cac) {
- WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);
- kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
-
- WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);
- kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
-
- WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);
- kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
-
- WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);
- kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
-
- WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);
- kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
-
- WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);
- kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
- }
-}
-#endif
-
-static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret = 0;
-
- if (pi->caps_cac) {
- if (enable) {
- ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);
- if (ret)
- pi->cac_enabled = false;
- else
- pi->cac_enabled = true;
- } else if (pi->cac_enabled) {
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);
- pi->cac_enabled = false;
- }
- }
-
- return ret;
-}
-
-static int kv_process_firmware_header(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, DpmTable),
- &tmp, pi->sram_end);
-
- if (ret == 0)
- pi->dpm_table_start = tmp;
-
- ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, SoftRegisters),
- &tmp, pi->sram_end);
-
- if (ret == 0)
- pi->soft_regs_start = tmp;
-
- return ret;
-}
-
-static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- pi->graphics_voltage_change_enable = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
- &pi->graphics_voltage_change_enable,
- sizeof(u8), pi->sram_end);
-
- return ret;
-}
-
-static int kv_set_dpm_interval(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- pi->graphics_interval = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
- &pi->graphics_interval,
- sizeof(u8), pi->sram_end);
-
- return ret;
-}
-
-static int kv_set_dpm_boot_state(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
- &pi->graphics_boot_level,
- sizeof(u8), pi->sram_end);
-
- return ret;
-}
-
-static void kv_program_vc(struct amdgpu_device *adev)
-{
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);
-}
-
-static void kv_clear_vc(struct amdgpu_device *adev)
-{
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
-}
-
-static int kv_set_divider_value(struct amdgpu_device *adev,
- u32 index, u32 sclk)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct atom_clock_dividers dividers;
- int ret;
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- sclk, false, &dividers);
- if (ret)
- return ret;
-
- pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
- pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
-
- return 0;
-}
-
-static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
- u16 voltage)
-{
- return 6200 - (voltage * 25);
-}
-
-static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,
- u32 vid_2bit)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 vid_8bit = kv_convert_vid2_to_vid7(adev,
- &pi->sys_info.vid_mapping_table,
- vid_2bit);
-
- return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);
-}
-
-
-static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
- pi->graphics_level[index].MinVddNb =
- cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));
-
- return 0;
-}
-
-static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->graphics_level[index].AT = cpu_to_be16((u16)at);
-
- return 0;
-}
-
-static void kv_dpm_power_level_enable(struct amdgpu_device *adev,
- u32 index, bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
-}
-
-static void kv_start_dpm(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
-
- tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-
- amdgpu_kv_smc_dpm_enable(adev, true);
-}
-
-static void kv_stop_dpm(struct amdgpu_device *adev)
-{
- amdgpu_kv_smc_dpm_enable(adev, false);
-}
-
-static void kv_start_am(struct amdgpu_device *adev)
-{
- u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
-
- sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
- SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
- sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
-
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
-}
-
-static void kv_reset_am(struct amdgpu_device *adev)
-{
- u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
-
- sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
- SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
-
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
-}
-
-static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)
-{
- return amdgpu_kv_notify_message_to_smu(adev, freeze ?
- PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
-}
-
-static int kv_force_lowest_valid(struct amdgpu_device *adev)
-{
- return kv_force_dpm_lowest(adev);
-}
-
-static int kv_unforce_levels(struct amdgpu_device *adev)
-{
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
- return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);
- else
- return kv_set_enabled_levels(adev);
-}
-
-static int kv_update_sclk_t(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 low_sclk_interrupt_t = 0;
- int ret = 0;
-
- if (pi->caps_sclk_throttle_low_notification) {
- low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
- (u8 *)&low_sclk_interrupt_t,
- sizeof(u32), pi->sram_end);
- }
- return ret;
-}
-
-static int kv_program_bootup_state(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- if (table && table->count) {
- for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
- if (table->entries[i].clk == pi->boot_pl.sclk)
- break;
- }
-
- pi->graphics_boot_level = (u8)i;
- kv_dpm_power_level_enable(adev, i, true);
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
-
- if (table->num_max_dpm_entries == 0)
- return -EINVAL;
-
- for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
- if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
- break;
- }
-
- pi->graphics_boot_level = (u8)i;
- kv_dpm_power_level_enable(adev, i, true);
- }
- return 0;
-}
-
-static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- pi->graphics_therm_throttle_enable = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
- &pi->graphics_therm_throttle_enable,
- sizeof(u8), pi->sram_end);
-
- return ret;
-}
-
-static int kv_upload_dpm_settings(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
- (u8 *)&pi->graphics_level,
- sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
- pi->sram_end);
-
- if (ret)
- return ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
- &pi->graphics_dpm_level_count,
- sizeof(u8), pi->sram_end);
-
- return ret;
-}
-
-static u32 kv_get_clock_difference(u32 a, u32 b)
-{
- return (a >= b) ? a - b : b - a;
-}
-
-static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 value;
-
- if (pi->caps_enable_dfs_bypass) {
- if (kv_get_clock_difference(clk, 40000) < 200)
- value = 3;
- else if (kv_get_clock_difference(clk, 30000) < 200)
- value = 2;
- else if (kv_get_clock_difference(clk, 20000) < 200)
- value = 7;
- else if (kv_get_clock_difference(clk, 15000) < 200)
- value = 6;
- else if (kv_get_clock_difference(clk, 10000) < 200)
- value = 8;
- else
- value = 0;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static int kv_populate_uvd_table(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_uvd_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- struct atom_clock_dividers dividers;
- int ret;
- u32 i;
-
- if (table == NULL || table->count == 0)
- return 0;
-
- pi->uvd_level_count = 0;
- for (i = 0; i < table->count; i++) {
- if (pi->high_voltage_t &&
- (pi->high_voltage_t < table->entries[i].v))
- break;
-
- pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
- pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
- pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
-
- pi->uvd_level[i].VClkBypassCntl =
- (u8)kv_get_clk_bypass(adev, table->entries[i].vclk);
- pi->uvd_level[i].DClkBypassCntl =
- (u8)kv_get_clk_bypass(adev, table->entries[i].dclk);
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- table->entries[i].vclk, false, &dividers);
- if (ret)
- return ret;
- pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- table->entries[i].dclk, false, &dividers);
- if (ret)
- return ret;
- pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
-
- pi->uvd_level_count++;
- }
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
- (u8 *)&pi->uvd_level_count,
- sizeof(u8), pi->sram_end);
- if (ret)
- return ret;
-
- pi->uvd_interval = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, UVDInterval),
- &pi->uvd_interval,
- sizeof(u8), pi->sram_end);
- if (ret)
- return ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, UvdLevel),
- (u8 *)&pi->uvd_level,
- sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
- pi->sram_end);
-
- return ret;
-
-}
-
-static int kv_populate_vce_table(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
- u32 i;
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- struct atom_clock_dividers dividers;
-
- if (table == NULL || table->count == 0)
- return 0;
-
- pi->vce_level_count = 0;
- for (i = 0; i < table->count; i++) {
- if (pi->high_voltage_t &&
- pi->high_voltage_t < table->entries[i].v)
- break;
-
- pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
- pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
-
- pi->vce_level[i].ClkBypassCntl =
- (u8)kv_get_clk_bypass(adev, table->entries[i].evclk);
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- table->entries[i].evclk, false, &dividers);
- if (ret)
- return ret;
- pi->vce_level[i].Divider = (u8)dividers.post_div;
-
- pi->vce_level_count++;
- }
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
- (u8 *)&pi->vce_level_count,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- pi->vce_interval = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, VCEInterval),
- (u8 *)&pi->vce_interval,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, VceLevel),
- (u8 *)&pi->vce_level,
- sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
- pi->sram_end);
-
- return ret;
-}
-
-static int kv_populate_samu_table(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
- struct atom_clock_dividers dividers;
- int ret;
- u32 i;
-
- if (table == NULL || table->count == 0)
- return 0;
-
- pi->samu_level_count = 0;
- for (i = 0; i < table->count; i++) {
- if (pi->high_voltage_t &&
- pi->high_voltage_t < table->entries[i].v)
- break;
-
- pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
- pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
-
- pi->samu_level[i].ClkBypassCntl =
- (u8)kv_get_clk_bypass(adev, table->entries[i].clk);
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- table->entries[i].clk, false, &dividers);
- if (ret)
- return ret;
- pi->samu_level[i].Divider = (u8)dividers.post_div;
-
- pi->samu_level_count++;
- }
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
- (u8 *)&pi->samu_level_count,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- pi->samu_interval = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
- (u8 *)&pi->samu_interval,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, SamuLevel),
- (u8 *)&pi->samu_level,
- sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
- pi->sram_end);
- if (ret)
- return ret;
-
- return ret;
-}
-
-
-static int kv_populate_acp_table(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
- struct atom_clock_dividers dividers;
- int ret;
- u32 i;
-
- if (table == NULL || table->count == 0)
- return 0;
-
- pi->acp_level_count = 0;
- for (i = 0; i < table->count; i++) {
- pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
- pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- table->entries[i].clk, false, &dividers);
- if (ret)
- return ret;
- pi->acp_level[i].Divider = (u8)dividers.post_div;
-
- pi->acp_level_count++;
- }
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
- (u8 *)&pi->acp_level_count,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- pi->acp_interval = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, ACPInterval),
- (u8 *)&pi->acp_interval,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, AcpLevel),
- (u8 *)&pi->acp_level,
- sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
- pi->sram_end);
- if (ret)
- return ret;
-
- return ret;
-}
-
-static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- if (table && table->count) {
- for (i = 0; i < pi->graphics_dpm_level_count; i++) {
- if (pi->caps_enable_dfs_bypass) {
- if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 3;
- else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 2;
- else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
- pi->graphics_level[i].ClkBypassCntl = 7;
- else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 6;
- else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 8;
- else
- pi->graphics_level[i].ClkBypassCntl = 0;
- } else {
- pi->graphics_level[i].ClkBypassCntl = 0;
- }
- }
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
- for (i = 0; i < pi->graphics_dpm_level_count; i++) {
- if (pi->caps_enable_dfs_bypass) {
- if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 3;
- else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 2;
- else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
- pi->graphics_level[i].ClkBypassCntl = 7;
- else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 6;
- else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 8;
- else
- pi->graphics_level[i].ClkBypassCntl = 0;
- } else {
- pi->graphics_level[i].ClkBypassCntl = 0;
- }
- }
- }
-}
-
-static int kv_enable_ulv(struct amdgpu_device *adev, bool enable)
-{
- return amdgpu_kv_notify_message_to_smu(adev, enable ?
- PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
-}
-
-static void kv_reset_acp_boot_level(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->acp_boot_level = 0xff;
-}
-
-static void kv_update_current_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct kv_ps *new_ps = kv_get_ps(rps);
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->current_rps = *rps;
- pi->current_ps = *new_ps;
- pi->current_rps.ps_priv = &pi->current_ps;
- adev->pm.dpm.current_ps = &pi->current_rps;
-}
-
-static void kv_update_requested_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct kv_ps *new_ps = kv_get_ps(rps);
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->requested_rps = *rps;
- pi->requested_ps = *new_ps;
- pi->requested_rps.ps_priv = &pi->requested_ps;
- adev->pm.dpm.requested_ps = &pi->requested_rps;
-}
-
-static void kv_dpm_enable_bapm(void *handle, bool enable)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- if (pi->bapm_enable) {
- ret = amdgpu_kv_smc_bapm_enable(adev, enable);
- if (ret)
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
- }
-}
-
-static int kv_dpm_enable(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- ret = kv_process_firmware_header(adev);
- if (ret) {
- DRM_ERROR("kv_process_firmware_header failed\n");
- return ret;
- }
- kv_init_fps_limits(adev);
- kv_init_graphics_levels(adev);
- ret = kv_program_bootup_state(adev);
- if (ret) {
- DRM_ERROR("kv_program_bootup_state failed\n");
- return ret;
- }
- kv_calculate_dfs_bypass_settings(adev);
- ret = kv_upload_dpm_settings(adev);
- if (ret) {
- DRM_ERROR("kv_upload_dpm_settings failed\n");
- return ret;
- }
- ret = kv_populate_uvd_table(adev);
- if (ret) {
- DRM_ERROR("kv_populate_uvd_table failed\n");
- return ret;
- }
- ret = kv_populate_vce_table(adev);
- if (ret) {
- DRM_ERROR("kv_populate_vce_table failed\n");
- return ret;
- }
- ret = kv_populate_samu_table(adev);
- if (ret) {
- DRM_ERROR("kv_populate_samu_table failed\n");
- return ret;
- }
- ret = kv_populate_acp_table(adev);
- if (ret) {
- DRM_ERROR("kv_populate_acp_table failed\n");
- return ret;
- }
- kv_program_vc(adev);
-#if 0
- kv_initialize_hardware_cac_manager(adev);
-#endif
- kv_start_am(adev);
- if (pi->enable_auto_thermal_throttling) {
- ret = kv_enable_auto_thermal_throttling(adev);
- if (ret) {
- DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
- return ret;
- }
- }
- ret = kv_enable_dpm_voltage_scaling(adev);
- if (ret) {
- DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
- return ret;
- }
- ret = kv_set_dpm_interval(adev);
- if (ret) {
- DRM_ERROR("kv_set_dpm_interval failed\n");
- return ret;
- }
- ret = kv_set_dpm_boot_state(adev);
- if (ret) {
- DRM_ERROR("kv_set_dpm_boot_state failed\n");
- return ret;
- }
- ret = kv_enable_ulv(adev, true);
- if (ret) {
- DRM_ERROR("kv_enable_ulv failed\n");
- return ret;
- }
- kv_start_dpm(adev);
- ret = kv_enable_didt(adev, true);
- if (ret) {
- DRM_ERROR("kv_enable_didt failed\n");
- return ret;
- }
- ret = kv_enable_smc_cac(adev, true);
- if (ret) {
- DRM_ERROR("kv_enable_smc_cac failed\n");
- return ret;
- }
-
- kv_reset_acp_boot_level(adev);
-
- ret = amdgpu_kv_smc_bapm_enable(adev, false);
- if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
- return ret;
- }
-
- if (adev->irq.installed &&
- amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
- ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
- if (ret) {
- DRM_ERROR("kv_set_thermal_temperature_range failed\n");
- return ret;
- }
- amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
- amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- }
-
- return ret;
-}
-
-static void kv_dpm_disable(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
-
- amdgpu_kv_smc_bapm_enable(adev, false);
-
- if (adev->asic_type == CHIP_MULLINS)
- kv_enable_nb_dpm(adev, false);
-
- /* powerup blocks */
- kv_dpm_powergate_acp(adev, false);
- kv_dpm_powergate_samu(adev, false);
- if (pi->caps_vce_pg) /* power on the VCE block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
- if (pi->caps_uvd_pg) /* power on the UVD block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
-
- kv_enable_smc_cac(adev, false);
- kv_enable_didt(adev, false);
- kv_clear_vc(adev);
- kv_stop_dpm(adev);
- kv_enable_ulv(adev, false);
- kv_reset_am(adev);
-
- kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
-}
-
-#if 0
-static int kv_write_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 value)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset,
- (u8 *)&value, sizeof(u16), pi->sram_end);
-}
-
-static int kv_read_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 *value)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset,
- value, pi->sram_end);
-}
-#endif
-
-static void kv_init_sclk_t(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->low_sclk_interrupt_t = 0;
-}
-
-static int kv_init_fps_limits(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret = 0;
-
- if (pi->caps_fps) {
- u16 tmp;
-
- tmp = 45;
- pi->fps_high_t = cpu_to_be16(tmp);
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, FpsHighT),
- (u8 *)&pi->fps_high_t,
- sizeof(u16), pi->sram_end);
-
- tmp = 30;
- pi->fps_low_t = cpu_to_be16(tmp);
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, FpsLowT),
- (u8 *)&pi->fps_low_t,
- sizeof(u16), pi->sram_end);
-
- }
- return ret;
-}
-
-static void kv_init_powergate_state(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->uvd_power_gated = false;
- pi->vce_power_gated = false;
- pi->samu_power_gated = false;
- pi->acp_power_gated = false;
-
-}
-
-static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
-{
- return amdgpu_kv_notify_message_to_smu(adev, enable ?
- PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
-}
-
-static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
-{
- return amdgpu_kv_notify_message_to_smu(adev, enable ?
- PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
-}
-
-static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
-{
- return amdgpu_kv_notify_message_to_smu(adev, enable ?
- PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
-}
-
-static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
-{
- return amdgpu_kv_notify_message_to_smu(adev, enable ?
- PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
-}
-
-static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_uvd_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- int ret;
- u32 mask;
-
- if (!gate) {
- if (table->count)
- pi->uvd_boot_level = table->count - 1;
- else
- pi->uvd_boot_level = 0;
-
- if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
- mask = 1 << pi->uvd_boot_level;
- } else {
- mask = 0x1f;
- }
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
- (uint8_t *)&pi->uvd_boot_level,
- sizeof(u8), pi->sram_end);
- if (ret)
- return ret;
-
- amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- mask);
- }
-
- return kv_enable_uvd_dpm(adev, !gate);
-}
-
-static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk)
-{
- u8 i;
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].evclk >= evclk)
- break;
- }
-
- return i;
-}
-
-static int kv_update_vce_dpm(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- int ret;
-
- if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
- if (pi->caps_stable_p_state)
- pi->vce_boot_level = table->count - 1;
- else
- pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk);
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
- (u8 *)&pi->vce_boot_level,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- if (pi->caps_stable_p_state)
- amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (1 << pi->vce_boot_level));
- kv_enable_vce_dpm(adev, true);
- } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
- kv_enable_vce_dpm(adev, false);
- }
-
- return 0;
-}
-
-static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
- int ret;
-
- if (!gate) {
- if (pi->caps_stable_p_state)
- pi->samu_boot_level = table->count - 1;
- else
- pi->samu_boot_level = 0;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
- (u8 *)&pi->samu_boot_level,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- if (pi->caps_stable_p_state)
- amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (1 << pi->samu_boot_level));
- }
-
- return kv_enable_samu_dpm(adev, !gate);
-}
-
-static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
-{
- u8 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].clk >= 0) /* XXX */
- break;
- }
-
- if (i >= table->count)
- i = table->count - 1;
-
- return i;
-}
-
-static void kv_update_acp_boot_level(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u8 acp_boot_level;
-
- if (!pi->caps_stable_p_state) {
- acp_boot_level = kv_get_acp_boot_level(adev);
- if (acp_boot_level != pi->acp_boot_level) {
- pi->acp_boot_level = acp_boot_level;
- amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ACPDPM_SetEnabledMask,
- (1 << pi->acp_boot_level));
- }
- }
-}
-
-static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
- int ret;
-
- if (!gate) {
- if (pi->caps_stable_p_state)
- pi->acp_boot_level = table->count - 1;
- else
- pi->acp_boot_level = kv_get_acp_boot_level(adev);
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
- (u8 *)&pi->acp_boot_level,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- if (pi->caps_stable_p_state)
- amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ACPDPM_SetEnabledMask,
- (1 << pi->acp_boot_level));
- }
-
- return kv_enable_acp_dpm(adev, !gate);
-}
-
-static void kv_dpm_powergate_uvd(void *handle, bool gate)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- pi->uvd_power_gated = gate;
-
- if (gate) {
- /* stop the UVD block */
- ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- kv_update_uvd_dpm(adev, gate);
- if (pi->caps_uvd_pg)
- /* power off the UVD block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
- } else {
- if (pi->caps_uvd_pg)
- /* power on the UVD block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
- /* re-init the UVD block */
- kv_update_uvd_dpm(adev, gate);
-
- ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
- }
-}
-
-static void kv_dpm_powergate_vce(void *handle, bool gate)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- pi->vce_power_gated = gate;
-
- if (gate) {
- /* stop the VCE block */
- ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- kv_enable_vce_dpm(adev, false);
- if (pi->caps_vce_pg) /* power off the VCE block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
- } else {
- if (pi->caps_vce_pg) /* power on the VCE block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
- kv_enable_vce_dpm(adev, true);
- /* re-init the VCE block */
- ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
- }
-}
-
-
-static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->samu_power_gated == gate)
- return;
-
- pi->samu_power_gated = gate;
-
- if (gate) {
- kv_update_samu_dpm(adev, true);
- if (pi->caps_samu_pg)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF);
- } else {
- if (pi->caps_samu_pg)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON);
- kv_update_samu_dpm(adev, false);
- }
-}
-
-static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->acp_power_gated == gate)
- return;
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
- return;
-
- pi->acp_power_gated = gate;
-
- if (gate) {
- kv_update_acp_dpm(adev, true);
- if (pi->caps_acp_pg)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF);
- } else {
- if (pi->caps_acp_pg)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON);
- kv_update_acp_dpm(adev, false);
- }
-}
-
-static void kv_set_valid_clock_range(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps)
-{
- struct kv_ps *new_ps = kv_get_ps(new_rps);
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- if (table && table->count) {
- for (i = 0; i < pi->graphics_dpm_level_count; i++) {
- if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
- (i == (pi->graphics_dpm_level_count - 1))) {
- pi->lowest_valid = i;
- break;
- }
- }
-
- for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
- if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
- break;
- }
- pi->highest_valid = i;
-
- if (pi->lowest_valid > pi->highest_valid) {
- if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
- (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
- pi->highest_valid = pi->lowest_valid;
- else
- pi->lowest_valid = pi->highest_valid;
- }
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
-
- for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
- if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
- i == (int)(pi->graphics_dpm_level_count - 1)) {
- pi->lowest_valid = i;
- break;
- }
- }
-
- for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
- if (table->entries[i].sclk_frequency <=
- new_ps->levels[new_ps->num_levels - 1].sclk)
- break;
- }
- pi->highest_valid = i;
-
- if (pi->lowest_valid > pi->highest_valid) {
- if ((new_ps->levels[0].sclk -
- table->entries[pi->highest_valid].sclk_frequency) >
- (table->entries[pi->lowest_valid].sclk_frequency -
- new_ps->levels[new_ps->num_levels -1].sclk))
- pi->highest_valid = pi->lowest_valid;
- else
- pi->lowest_valid = pi->highest_valid;
- }
- }
-}
-
-static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps)
-{
- struct kv_ps *new_ps = kv_get_ps(new_rps);
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret = 0;
- u8 clk_bypass_cntl;
-
- if (pi->caps_enable_dfs_bypass) {
- clk_bypass_cntl = new_ps->need_dfs_bypass ?
- pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- (pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
- (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
- offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
- &clk_bypass_cntl,
- sizeof(u8), pi->sram_end);
- }
-
- return ret;
-}
-
-static int kv_enable_nb_dpm(struct amdgpu_device *adev,
- bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret = 0;
-
- if (enable) {
- if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
- ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable);
- if (ret == 0)
- pi->nb_dpm_enabled = true;
- }
- } else {
- if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
- ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable);
- if (ret == 0)
- pi->nb_dpm_enabled = false;
- }
- }
-
- return ret;
-}
-
-static int kv_dpm_force_performance_level(void *handle,
- enum amd_dpm_forced_level level)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
- ret = kv_force_dpm_highest(adev);
- if (ret)
- return ret;
- } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
- ret = kv_force_dpm_lowest(adev);
- if (ret)
- return ret;
- } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
- ret = kv_unforce_levels(adev);
- if (ret)
- return ret;
- }
-
- adev->pm.dpm.forced_level = level;
-
- return 0;
-}
-
-static int kv_dpm_pre_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
- struct amdgpu_ps *new_ps = &requested_ps;
-
- kv_update_requested_ps(adev, new_ps);
-
- kv_apply_state_adjust_rules(adev,
- &pi->requested_rps,
- &pi->current_rps);
-
- return 0;
-}
-
-static int kv_dpm_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_ps *new_ps = &pi->requested_rps;
- struct amdgpu_ps *old_ps = &pi->current_rps;
- int ret;
-
- if (pi->bapm_enable) {
- ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
- if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
- return ret;
- }
- }
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
- if (pi->enable_dpm) {
- kv_set_valid_clock_range(adev, new_ps);
- kv_update_dfs_bypass_settings(adev, new_ps);
- ret = kv_calculate_ds_divider(adev);
- if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
- return ret;
- }
- kv_calculate_nbps_level_settings(adev);
- kv_calculate_dpm_settings(adev);
- kv_force_lowest_valid(adev);
- kv_enable_new_levels(adev);
- kv_upload_dpm_settings(adev);
- kv_program_nbps_index_settings(adev, new_ps);
- kv_unforce_levels(adev);
- kv_set_enabled_levels(adev);
- kv_force_lowest_valid(adev);
- kv_unforce_levels(adev);
-
- ret = kv_update_vce_dpm(adev, new_ps, old_ps);
- if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
- return ret;
- }
- kv_update_sclk_t(adev);
- if (adev->asic_type == CHIP_MULLINS)
- kv_enable_nb_dpm(adev, true);
- }
- } else {
- if (pi->enable_dpm) {
- kv_set_valid_clock_range(adev, new_ps);
- kv_update_dfs_bypass_settings(adev, new_ps);
- ret = kv_calculate_ds_divider(adev);
- if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
- return ret;
- }
- kv_calculate_nbps_level_settings(adev);
- kv_calculate_dpm_settings(adev);
- kv_freeze_sclk_dpm(adev, true);
- kv_upload_dpm_settings(adev);
- kv_program_nbps_index_settings(adev, new_ps);
- kv_freeze_sclk_dpm(adev, false);
- kv_set_enabled_levels(adev);
- ret = kv_update_vce_dpm(adev, new_ps, old_ps);
- if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
- return ret;
- }
- kv_update_acp_boot_level(adev);
- kv_update_sclk_t(adev);
- kv_enable_nb_dpm(adev, true);
- }
- }
-
- return 0;
-}
-
-static void kv_dpm_post_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_ps *new_ps = &pi->requested_rps;
-
- kv_update_current_ps(adev, new_ps);
-}
-
-static void kv_dpm_setup_asic(struct amdgpu_device *adev)
-{
- sumo_take_smu_control(adev, true);
- kv_init_powergate_state(adev);
- kv_init_sclk_t(adev);
-}
-
-#if 0
-static void kv_dpm_reset_asic(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
- kv_force_lowest_valid(adev);
- kv_init_graphics_levels(adev);
- kv_program_bootup_state(adev);
- kv_upload_dpm_settings(adev);
- kv_force_lowest_valid(adev);
- kv_unforce_levels(adev);
- } else {
- kv_init_graphics_levels(adev);
- kv_program_bootup_state(adev);
- kv_freeze_sclk_dpm(adev, true);
- kv_upload_dpm_settings(adev);
- kv_freeze_sclk_dpm(adev, false);
- kv_set_enabled_level(adev, pi->graphics_boot_level);
- }
-}
-#endif
-
-static void kv_construct_max_power_limits_table(struct amdgpu_device *adev,
- struct amdgpu_clock_and_voltage_limits *table)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
- int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
- table->sclk =
- pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
- table->vddc =
- kv_convert_2bit_index_to_voltage(adev,
- pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
- }
-
- table->mclk = pi->sys_info.nbp_memory_clock[0];
-}
-
-static void kv_patch_voltage_values(struct amdgpu_device *adev)
-{
- int i;
- struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table *samu_table =
- &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table *acp_table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
-
- if (uvd_table->count) {
- for (i = 0; i < uvd_table->count; i++)
- uvd_table->entries[i].v =
- kv_convert_8bit_index_to_voltage(adev,
- uvd_table->entries[i].v);
- }
-
- if (vce_table->count) {
- for (i = 0; i < vce_table->count; i++)
- vce_table->entries[i].v =
- kv_convert_8bit_index_to_voltage(adev,
- vce_table->entries[i].v);
- }
-
- if (samu_table->count) {
- for (i = 0; i < samu_table->count; i++)
- samu_table->entries[i].v =
- kv_convert_8bit_index_to_voltage(adev,
- samu_table->entries[i].v);
- }
-
- if (acp_table->count) {
- for (i = 0; i < acp_table->count; i++)
- acp_table->entries[i].v =
- kv_convert_8bit_index_to_voltage(adev,
- acp_table->entries[i].v);
- }
-
-}
-
-static void kv_construct_boot_state(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
- pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
- pi->boot_pl.ds_divider_index = 0;
- pi->boot_pl.ss_divider_index = 0;
- pi->boot_pl.allow_gnb_slow = 1;
- pi->boot_pl.force_nbp_state = 0;
- pi->boot_pl.display_wm = 0;
- pi->boot_pl.vce_wm = 0;
-}
-
-static int kv_force_dpm_highest(struct amdgpu_device *adev)
-{
- int ret;
- u32 enable_mask, i;
-
- ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
- if (ret)
- return ret;
-
- for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
- if (enable_mask & (1 << i))
- break;
- }
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
- return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
- else
- return kv_set_enabled_level(adev, i);
-}
-
-static int kv_force_dpm_lowest(struct amdgpu_device *adev)
-{
- int ret;
- u32 enable_mask, i;
-
- ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
- if (ret)
- return ret;
-
- for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
- if (enable_mask & (1 << i))
- break;
- }
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
- return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
- else
- return kv_set_enabled_level(adev, i);
-}
-
-static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
- u32 sclk, u32 min_sclk_in_sr)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- u32 temp;
- u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);
-
- if (sclk < min)
- return 0;
-
- if (!pi->caps_sclk_ds)
- return 0;
-
- for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
- temp = sclk >> i;
- if (temp >= min)
- break;
- }
-
- return (u8)i;
-}
-
-static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- int i;
-
- if (table && table->count) {
- for (i = table->count - 1; i >= 0; i--) {
- if (pi->high_voltage_t &&
- (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <=
- pi->high_voltage_t)) {
- *limit = i;
- return 0;
- }
- }
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
-
- for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
- if (pi->high_voltage_t &&
- (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <=
- pi->high_voltage_t)) {
- *limit = i;
- return 0;
- }
- }
- }
-
- *limit = 0;
- return 0;
-}
-
-static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps,
- struct amdgpu_ps *old_rps)
-{
- struct kv_ps *ps = kv_get_ps(new_rps);
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 min_sclk = 10000; /* ??? */
- u32 sclk, mclk = 0;
- int i, limit;
- bool force_high;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 stable_p_state_sclk = 0;
- struct amdgpu_clock_and_voltage_limits *max_limits =
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
-
- if (new_rps->vce_active) {
- new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
- new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
- } else {
- new_rps->evclk = 0;
- new_rps->ecclk = 0;
- }
-
- mclk = max_limits->mclk;
- sclk = min_sclk;
-
- if (pi->caps_stable_p_state) {
- stable_p_state_sclk = (max_limits->sclk * 75) / 100;
-
- for (i = table->count - 1; i >= 0; i--) {
- if (stable_p_state_sclk >= table->entries[i].clk) {
- stable_p_state_sclk = table->entries[i].clk;
- break;
- }
- }
-
- if (i > 0)
- stable_p_state_sclk = table->entries[0].clk;
-
- sclk = stable_p_state_sclk;
- }
-
- if (new_rps->vce_active) {
- if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
- sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
- }
-
- ps->need_dfs_bypass = true;
-
- for (i = 0; i < ps->num_levels; i++) {
- if (ps->levels[i].sclk < sclk)
- ps->levels[i].sclk = sclk;
- }
-
- if (table && table->count) {
- for (i = 0; i < ps->num_levels; i++) {
- if (pi->high_voltage_t &&
- (pi->high_voltage_t <
- kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
- kv_get_high_voltage_limit(adev, &limit);
- ps->levels[i].sclk = table->entries[limit].clk;
- }
- }
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
-
- for (i = 0; i < ps->num_levels; i++) {
- if (pi->high_voltage_t &&
- (pi->high_voltage_t <
- kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
- kv_get_high_voltage_limit(adev, &limit);
- ps->levels[i].sclk = table->entries[limit].sclk_frequency;
- }
- }
- }
-
- if (pi->caps_stable_p_state) {
- for (i = 0; i < ps->num_levels; i++) {
- ps->levels[i].sclk = stable_p_state_sclk;
- }
- }
-
- pi->video_start = new_rps->dclk || new_rps->vclk ||
- new_rps->evclk || new_rps->ecclk;
-
- if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
- ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
- pi->battery_state = true;
- else
- pi->battery_state = false;
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
- ps->dpm0_pg_nb_ps_lo = 0x1;
- ps->dpm0_pg_nb_ps_hi = 0x0;
- ps->dpmx_nb_ps_lo = 0x1;
- ps->dpmx_nb_ps_hi = 0x0;
- } else {
- ps->dpm0_pg_nb_ps_lo = 0x3;
- ps->dpm0_pg_nb_ps_hi = 0x0;
- ps->dpmx_nb_ps_lo = 0x3;
- ps->dpmx_nb_ps_hi = 0x0;
-
- if (pi->sys_info.nb_dpm_enable) {
- force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
- pi->disable_nb_ps3_in_battery;
- ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
- ps->dpm0_pg_nb_ps_hi = 0x2;
- ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
- ps->dpmx_nb_ps_hi = 0x2;
- }
- }
-}
-
-static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev,
- u32 index, bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
-}
-
-static int kv_calculate_ds_divider(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 sclk_in_sr = 10000; /* ??? */
- u32 i;
-
- if (pi->lowest_valid > pi->highest_valid)
- return -EINVAL;
-
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
- pi->graphics_level[i].DeepSleepDivId =
- kv_get_sleep_divider_id_from_clock(adev,
- be32_to_cpu(pi->graphics_level[i].SclkFrequency),
- sclk_in_sr);
- }
- return 0;
-}
-
-static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- bool force_high;
- struct amdgpu_clock_and_voltage_limits *max_limits =
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- u32 mclk = max_limits->mclk;
-
- if (pi->lowest_valid > pi->highest_valid)
- return -EINVAL;
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
- pi->graphics_level[i].GnbSlow = 1;
- pi->graphics_level[i].ForceNbPs1 = 0;
- pi->graphics_level[i].UpH = 0;
- }
-
- if (!pi->sys_info.nb_dpm_enable)
- return 0;
-
- force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
-
- if (force_high) {
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
- pi->graphics_level[i].GnbSlow = 0;
- } else {
- if (pi->battery_state)
- pi->graphics_level[0].ForceNbPs1 = 1;
-
- pi->graphics_level[1].GnbSlow = 0;
- pi->graphics_level[2].GnbSlow = 0;
- pi->graphics_level[3].GnbSlow = 0;
- pi->graphics_level[4].GnbSlow = 0;
- }
- } else {
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
- pi->graphics_level[i].GnbSlow = 1;
- pi->graphics_level[i].ForceNbPs1 = 0;
- pi->graphics_level[i].UpH = 0;
- }
-
- if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
- pi->graphics_level[pi->lowest_valid].UpH = 0x28;
- pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
- if (pi->lowest_valid != pi->highest_valid)
- pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
- }
- }
- return 0;
-}
-
-static int kv_calculate_dpm_settings(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
-
- if (pi->lowest_valid > pi->highest_valid)
- return -EINVAL;
-
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
- pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
-
- return 0;
-}
-
-static void kv_init_graphics_levels(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- if (table && table->count) {
- u32 vid_2bit;
-
- pi->graphics_dpm_level_count = 0;
- for (i = 0; i < table->count; i++) {
- if (pi->high_voltage_t &&
- (pi->high_voltage_t <
- kv_convert_8bit_index_to_voltage(adev, table->entries[i].v)))
- break;
-
- kv_set_divider_value(adev, i, table->entries[i].clk);
- vid_2bit = kv_convert_vid7_to_vid2(adev,
- &pi->sys_info.vid_mapping_table,
- table->entries[i].v);
- kv_set_vid(adev, i, vid_2bit);
- kv_set_at(adev, i, pi->at[i]);
- kv_dpm_power_level_enabled_for_throttle(adev, i, true);
- pi->graphics_dpm_level_count++;
- }
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
-
- pi->graphics_dpm_level_count = 0;
- for (i = 0; i < table->num_max_dpm_entries; i++) {
- if (pi->high_voltage_t &&
- pi->high_voltage_t <
- kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit))
- break;
-
- kv_set_divider_value(adev, i, table->entries[i].sclk_frequency);
- kv_set_vid(adev, i, table->entries[i].vid_2bit);
- kv_set_at(adev, i, pi->at[i]);
- kv_dpm_power_level_enabled_for_throttle(adev, i, true);
- pi->graphics_dpm_level_count++;
- }
- }
-
- for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
- kv_dpm_power_level_enable(adev, i, false);
-}
-
-static void kv_enable_new_levels(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
-
- for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
- if (i >= pi->lowest_valid && i <= pi->highest_valid)
- kv_dpm_power_level_enable(adev, i, true);
- }
-}
-
-static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level)
-{
- u32 new_mask = (1 << level);
-
- return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- new_mask);
-}
-
-static int kv_set_enabled_levels(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i, new_mask = 0;
-
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
- new_mask |= (1 << i);
-
- return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- new_mask);
-}
-
-static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps)
-{
- struct kv_ps *new_ps = kv_get_ps(new_rps);
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 nbdpmconfig1;
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
- return;
-
- if (pi->sys_info.nb_dpm_enable) {
- nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1);
- nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK |
- NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK |
- NB_DPM_CONFIG_1__DpmXNbPsLo_MASK |
- NB_DPM_CONFIG_1__DpmXNbPsHi_MASK);
- nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) |
- (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) |
- (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) |
- (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT);
- WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1);
- }
-}
-
-static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
- int min_temp, int max_temp)
-{
- int low_temp = 0 * 1000;
- int high_temp = 255 * 1000;
- u32 tmp;
-
- if (low_temp < min_temp)
- low_temp = min_temp;
- if (high_temp > max_temp)
- high_temp = max_temp;
- if (high_temp < low_temp) {
- DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
- return -EINVAL;
- }
-
- tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
- tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK |
- CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
- tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) |
- ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
- WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp);
-
- adev->pm.dpm.thermal.min_temp = low_temp;
- adev->pm.dpm.thermal.max_temp = high_temp;
-
- return 0;
-}
-
-union igp_info {
- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
-};
-
-static int kv_parse_sys_info_table(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
- union igp_info *igp_info;
- u8 frev, crev;
- u16 data_offset;
- int i;
-
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset)) {
- igp_info = (union igp_info *)(mode_info->atom_context->bios +
- data_offset);
-
- if (crev != 8) {
- DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
- return -EINVAL;
- }
- pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
- pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
- pi->sys_info.bootup_nb_voltage_index =
- le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
- if (igp_info->info_8.ucHtcTmpLmt == 0)
- pi->sys_info.htc_tmp_lmt = 203;
- else
- pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
- if (igp_info->info_8.ucHtcHystLmt == 0)
- pi->sys_info.htc_hyst_lmt = 5;
- else
- pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
- if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
- DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
- }
-
- if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
- pi->sys_info.nb_dpm_enable = true;
- else
- pi->sys_info.nb_dpm_enable = false;
-
- for (i = 0; i < KV_NUM_NBPSTATES; i++) {
- pi->sys_info.nbp_memory_clock[i] =
- le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
- pi->sys_info.nbp_n_clock[i] =
- le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
- }
- if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
- pi->caps_enable_dfs_bypass = true;
-
- sumo_construct_sclk_voltage_mapping_table(adev,
- &pi->sys_info.sclk_voltage_mapping_table,
- igp_info->info_8.sAvail_SCLK);
-
- sumo_construct_vid_mapping_table(adev,
- &pi->sys_info.vid_mapping_table,
- igp_info->info_8.sAvail_SCLK);
-
- kv_construct_max_power_limits_table(adev,
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
- }
- return 0;
-}
-
-union power_info {
- struct _ATOM_POWERPLAY_INFO info;
- struct _ATOM_POWERPLAY_INFO_V2 info_2;
- struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
-};
-
-union pplib_clock_info {
- struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
- struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
- struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
-};
-
-union pplib_power_state {
- struct _ATOM_PPLIB_STATE v1;
- struct _ATOM_PPLIB_STATE_V2 v2;
-};
-
-static void kv_patch_boot_state(struct amdgpu_device *adev,
- struct kv_ps *ps)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- ps->num_levels = 1;
- ps->levels[0] = pi->boot_pl;
-}
-
-static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps,
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
- u8 table_rev)
-{
- struct kv_ps *ps = kv_get_ps(rps);
-
- rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
- rps->class = le16_to_cpu(non_clock_info->usClassification);
- rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
-
- if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
- rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
- rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else {
- rps->vclk = 0;
- rps->dclk = 0;
- }
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
- adev->pm.dpm.boot_ps = rps;
- kv_patch_boot_state(adev, ps);
- }
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- adev->pm.dpm.uvd_ps = rps;
-}
-
-static void kv_parse_pplib_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps, int index,
- union pplib_clock_info *clock_info)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct kv_ps *ps = kv_get_ps(rps);
- struct kv_pl *pl = &ps->levels[index];
- u32 sclk;
-
- sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
- sclk |= clock_info->sumo.ucEngineClockHigh << 16;
- pl->sclk = sclk;
- pl->vddc_index = clock_info->sumo.vddcIndex;
-
- ps->num_levels = index + 1;
-
- if (pi->caps_sclk_ds) {
- pl->ds_divider_index = 5;
- pl->ss_divider_index = 5;
- }
-}
-
-static int kv_parse_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
- union pplib_power_state *power_state;
- int i, j, k, non_clock_array_index, clock_array_index;
- union pplib_clock_info *clock_info;
- struct _StateArray *state_array;
- struct _ClockInfoArray *clock_info_array;
- struct _NonClockInfoArray *non_clock_info_array;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- u8 *power_state_offset;
- struct kv_ps *ps;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- amdgpu_add_thermal_controller(adev);
-
- state_array = (struct _StateArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usStateArrayOffset));
- clock_info_array = (struct _ClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
- non_clock_info_array = (struct _NonClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
-
- adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
- sizeof(struct amdgpu_ps),
- GFP_KERNEL);
- if (!adev->pm.dpm.ps)
- return -ENOMEM;
- power_state_offset = (u8 *)state_array->states;
- for (i = 0; i < state_array->ucNumEntries; i++) {
- u8 *idx;
- power_state = (union pplib_power_state *)power_state_offset;
- non_clock_array_index = power_state->v2.nonClockInfoIndex;
- non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
- &non_clock_info_array->nonClockInfo[non_clock_array_index];
- ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
- return -ENOMEM;
- }
- adev->pm.dpm.ps[i].ps_priv = ps;
- k = 0;
- idx = (u8 *)&power_state->v2.clockInfoIndex[0];
- for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = idx[j];
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
- if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
- break;
- clock_info = (union pplib_clock_info *)
- ((u8 *)&clock_info_array->clockInfo[0] +
- (clock_array_index * clock_info_array->ucEntrySize));
- kv_parse_pplib_clock_info(adev,
- &adev->pm.dpm.ps[i], k,
- clock_info);
- k++;
- }
- kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
- non_clock_info,
- non_clock_info_array->ucEntrySize);
- power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
- }
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
-
- /* fill in the vce power states */
- for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
- u32 sclk;
- clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
- clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
- sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
- sclk |= clock_info->sumo.ucEngineClockHigh << 16;
- adev->pm.dpm.vce_states[i].sclk = sclk;
- adev->pm.dpm.vce_states[i].mclk = 0;
- }
-
- return 0;
-}
-
-static int kv_dpm_init(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi;
- int ret, i;
-
- pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
- if (pi == NULL)
- return -ENOMEM;
- adev->pm.dpm.priv = pi;
-
- ret = amdgpu_get_platform_caps(adev);
- if (ret)
- return ret;
-
- ret = amdgpu_parse_extended_power_table(adev);
- if (ret)
- return ret;
-
- for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
- pi->at[i] = TRINITY_AT_DFLT;
-
- pi->sram_end = SMC_RAM_END;
-
- pi->enable_nb_dpm = true;
-
- pi->caps_power_containment = true;
- pi->caps_cac = true;
- pi->enable_didt = false;
- if (pi->enable_didt) {
- pi->caps_sq_ramping = true;
- pi->caps_db_ramping = true;
- pi->caps_td_ramping = true;
- pi->caps_tcp_ramping = true;
- }
-
- if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- pi->caps_sclk_ds = true;
- else
- pi->caps_sclk_ds = false;
-
- pi->enable_auto_thermal_throttling = true;
- pi->disable_nb_ps3_in_battery = false;
- if (amdgpu_bapm == 0)
- pi->bapm_enable = false;
- else
- pi->bapm_enable = true;
- pi->voltage_drop_t = 0;
- pi->caps_sclk_throttle_low_notification = false;
- pi->caps_fps = false; /* true? */
- pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
- pi->caps_uvd_dpm = true;
- pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
- pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
- pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
- pi->caps_stable_p_state = false;
-
- ret = kv_parse_sys_info_table(adev);
- if (ret)
- return ret;
-
- kv_patch_voltage_values(adev);
- kv_construct_boot_state(adev);
-
- ret = kv_parse_power_table(adev);
- if (ret)
- return ret;
-
- pi->enable_dpm = true;
-
- return 0;
-}
-
-static void
-kv_dpm_debugfs_print_current_performance_level(void *handle,
- struct seq_file *m)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 current_index =
- (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
- u32 sclk, tmp;
- u16 vddc;
-
- if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
- seq_printf(m, "invalid dpm profile %d\n", current_index);
- } else {
- sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
- tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
- vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp);
- seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
- seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
- seq_printf(m, "power level %d sclk: %u vddc: %u\n",
- current_index, sclk, vddc);
- }
-}
-
-static void
-kv_dpm_print_power_state(void *handle, void *request_ps)
-{
- int i;
- struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
- struct kv_ps *ps = kv_get_ps(rps);
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- for (i = 0; i < ps->num_levels; i++) {
- struct kv_pl *pl = &ps->levels[i];
- printk("\t\tpower level %d sclk: %u vddc: %u\n",
- i, pl->sclk,
- kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
- }
- amdgpu_dpm_print_ps_status(adev, rps);
-}
-
-static void kv_dpm_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- kfree(adev->pm.dpm.ps[i].ps_priv);
- }
- kfree(adev->pm.dpm.ps);
- kfree(adev->pm.dpm.priv);
- amdgpu_free_extended_power_table(adev);
-}
-
-static void kv_dpm_display_configuration_changed(void *handle)
-{
-
-}
-
-static u32 kv_dpm_get_sclk(void *handle, bool low)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
-
- if (low)
- return requested_state->levels[0].sclk;
- else
- return requested_state->levels[requested_state->num_levels - 1].sclk;
-}
-
-static u32 kv_dpm_get_mclk(void *handle, bool low)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
-
- return pi->sys_info.bootup_uma_clk;
-}
-
-/* get temperature in millidegrees */
-static int kv_dpm_get_temp(void *handle)
-{
- u32 temp;
- int actual_temp = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- temp = RREG32_SMC(0xC0300E0C);
-
- if (temp)
- actual_temp = (temp / 8) - 49;
- else
- actual_temp = 0;
-
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
-}
-
-static int kv_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- adev->powerplay.pp_funcs = &kv_dpm_funcs;
- adev->powerplay.pp_handle = adev;
- kv_dpm_set_irq_funcs(adev);
-
- return 0;
-}
-
-static int kv_dpm_late_init(void *handle)
-{
- /* powerdown unused blocks for now */
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->pm.dpm_enabled)
- return 0;
-
- kv_dpm_powergate_acp(adev, true);
- kv_dpm_powergate_samu(adev, true);
-
- return 0;
-}
-
-static int kv_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
- &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
- &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- /* default to balanced state */
- adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
- adev->pm.default_sclk = adev->clock.default_sclk;
- adev->pm.default_mclk = adev->clock.default_mclk;
- adev->pm.current_sclk = adev->clock.default_sclk;
- adev->pm.current_mclk = adev->clock.default_mclk;
- adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
-
- if (amdgpu_dpm == 0)
- return 0;
-
- INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
- mutex_lock(&adev->pm.mutex);
- ret = kv_dpm_init(adev);
- if (ret)
- goto dpm_failed;
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- if (amdgpu_dpm == 1)
- amdgpu_pm_print_power_states(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_INFO("amdgpu: dpm initialized\n");
-
- return 0;
-
-dpm_failed:
- kv_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
- return ret;
-}
-
-static int kv_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- flush_work(&adev->pm.dpm.thermal.work);
-
- mutex_lock(&adev->pm.mutex);
- kv_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
-
- return 0;
-}
-
-static int kv_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!amdgpu_dpm)
- return 0;
-
- mutex_lock(&adev->pm.mutex);
- kv_dpm_setup_asic(adev);
- ret = kv_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
- return ret;
-}
-
-static int kv_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- kv_dpm_disable(adev);
- mutex_unlock(&adev->pm.mutex);
- }
-
- return 0;
-}
-
-static int kv_dpm_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- /* disable dpm */
- kv_dpm_disable(adev);
- /* reset the power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- mutex_unlock(&adev->pm.mutex);
- }
- return 0;
-}
-
-static int kv_dpm_resume(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- /* asic init will reset to the boot state */
- mutex_lock(&adev->pm.mutex);
- kv_dpm_setup_asic(adev);
- ret = kv_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
- }
- return 0;
-}
-
-static bool kv_dpm_is_idle(void *handle)
-{
- return true;
-}
-
-static int kv_dpm_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-
-static int kv_dpm_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 cg_thermal_int;
-
- switch (type) {
- case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-
-static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- bool queue_thermal = false;
-
- if (entry == NULL)
- return -EINVAL;
-
- switch (entry->src_id) {
- case 230: /* thermal low to high */
- DRM_DEBUG("IH: thermal low to high\n");
- adev->pm.dpm.thermal.high_to_low = false;
- queue_thermal = true;
- break;
- case 231: /* thermal high to low */
- DRM_DEBUG("IH: thermal high to low\n");
- adev->pm.dpm.thermal.high_to_low = true;
- queue_thermal = true;
- break;
- default:
- break;
- }
-
- if (queue_thermal)
- schedule_work(&adev->pm.dpm.thermal.work);
-
- return 0;
-}
-
-static int kv_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int kv_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
- const struct kv_pl *kv_cpl2)
-{
- return ((kv_cpl1->sclk == kv_cpl2->sclk) &&
- (kv_cpl1->vddc_index == kv_cpl2->vddc_index) &&
- (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) &&
- (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
-}
-
-static int kv_check_state_equal(void *handle,
- void *current_ps,
- void *request_ps,
- bool *equal)
-{
- struct kv_ps *kv_cps;
- struct kv_ps *kv_rps;
- int i;
- struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
- struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
- return -EINVAL;
-
- kv_cps = kv_get_ps(cps);
- kv_rps = kv_get_ps(rps);
-
- if (kv_cps == NULL) {
- *equal = false;
- return 0;
- }
-
- if (kv_cps->num_levels != kv_rps->num_levels) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < kv_cps->num_levels; i++) {
- if (!kv_are_power_levels_equal(&(kv_cps->levels[i]),
- &(kv_rps->levels[i]))) {
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
- *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
-
- return 0;
-}
-
-static int kv_dpm_read_sensor(void *handle, int idx,
- void *value, int *size)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- uint32_t sclk;
- u32 pl_index =
- (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
-
- /* size must be at least 4 bytes for all sensors */
- if (*size < 4)
- return -EINVAL;
-
- switch (idx) {
- case AMDGPU_PP_SENSOR_GFX_SCLK:
- if (pl_index < SMU__NUM_SCLK_DPM_STATE) {
- sclk = be32_to_cpu(
- pi->graphics_level[pl_index].SclkFrequency);
- *((uint32_t *)value) = sclk;
- *size = 4;
- return 0;
- }
- return -EINVAL;
- case AMDGPU_PP_SENSOR_GPU_TEMP:
- *((uint32_t *)value) = kv_dpm_get_temp(adev);
- *size = 4;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static int kv_set_powergating_by_smu(void *handle,
- uint32_t block_type, bool gate)
-{
- switch (block_type) {
- case AMD_IP_BLOCK_TYPE_UVD:
- kv_dpm_powergate_uvd(handle, gate);
- break;
- case AMD_IP_BLOCK_TYPE_VCE:
- kv_dpm_powergate_vce(handle, gate);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static const struct amd_ip_funcs kv_dpm_ip_funcs = {
- .name = "kv_dpm",
- .early_init = kv_dpm_early_init,
- .late_init = kv_dpm_late_init,
- .sw_init = kv_dpm_sw_init,
- .sw_fini = kv_dpm_sw_fini,
- .hw_init = kv_dpm_hw_init,
- .hw_fini = kv_dpm_hw_fini,
- .suspend = kv_dpm_suspend,
- .resume = kv_dpm_resume,
- .is_idle = kv_dpm_is_idle,
- .wait_for_idle = kv_dpm_wait_for_idle,
- .soft_reset = kv_dpm_soft_reset,
- .set_clockgating_state = kv_dpm_set_clockgating_state,
- .set_powergating_state = kv_dpm_set_powergating_state,
-};
-
-const struct amdgpu_ip_block_version kv_smu_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &kv_dpm_ip_funcs,
-};
-
-static const struct amd_pm_funcs kv_dpm_funcs = {
- .pre_set_power_state = &kv_dpm_pre_set_power_state,
- .set_power_state = &kv_dpm_set_power_state,
- .post_set_power_state = &kv_dpm_post_set_power_state,
- .display_configuration_changed = &kv_dpm_display_configuration_changed,
- .get_sclk = &kv_dpm_get_sclk,
- .get_mclk = &kv_dpm_get_mclk,
- .print_power_state = &kv_dpm_print_power_state,
- .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &kv_dpm_force_performance_level,
- .set_powergating_by_smu = kv_set_powergating_by_smu,
- .enable_bapm = &kv_dpm_enable_bapm,
- .get_vce_clock_state = amdgpu_get_vce_clock_state,
- .check_state_equal = kv_check_state_equal,
- .read_sensor = &kv_dpm_read_sensor,
-};
-
-static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
- .set = kv_dpm_set_interrupt_state,
- .process = kv_dpm_process_interrupt,
-};
-
-static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
-{
- adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
- adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h b/drivers/gpu/drm/amd/amdgpu/kv_dpm.h
deleted file mode 100644
index 6df0ed41317c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __KV_DPM_H__
-#define __KV_DPM_H__
-
-#define SMU__NUM_SCLK_DPM_STATE 8
-#define SMU__NUM_MCLK_DPM_LEVELS 4
-#define SMU__NUM_LCLK_DPM_LEVELS 8
-#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */
-#include "smu7_fusion.h"
-#include "ppsmc.h"
-
-#define SUMO_MAX_HARDWARE_POWERLEVELS 5
-
-#define SUMO_MAX_NUMBER_VOLTAGES 4
-
-struct sumo_vid_mapping_entry {
- u16 vid_2bit;
- u16 vid_7bit;
-};
-
-struct sumo_vid_mapping_table {
- u32 num_entries;
- struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES];
-};
-
-struct sumo_sclk_voltage_mapping_entry {
- u32 sclk_frequency;
- u16 vid_2bit;
- u16 rsv;
-};
-
-struct sumo_sclk_voltage_mapping_table {
- u32 num_max_dpm_entries;
- struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS];
-};
-
-#define TRINITY_AT_DFLT 30
-
-#define KV_NUM_NBPSTATES 4
-
-enum kv_pt_config_reg_type {
- KV_CONFIGREG_MMR = 0,
- KV_CONFIGREG_SMC_IND,
- KV_CONFIGREG_DIDT_IND,
- KV_CONFIGREG_CACHE,
- KV_CONFIGREG_MAX
-};
-
-struct kv_pt_config_reg {
- u32 offset;
- u32 mask;
- u32 shift;
- u32 value;
- enum kv_pt_config_reg_type type;
-};
-
-struct kv_lcac_config_values {
- u32 block_id;
- u32 signal_id;
- u32 t;
-};
-
-struct kv_lcac_config_reg {
- u32 cntl;
- u32 block_mask;
- u32 block_shift;
- u32 signal_mask;
- u32 signal_shift;
- u32 t_mask;
- u32 t_shift;
- u32 enable_mask;
- u32 enable_shift;
-};
-
-struct kv_pl {
- u32 sclk;
- u8 vddc_index;
- u8 ds_divider_index;
- u8 ss_divider_index;
- u8 allow_gnb_slow;
- u8 force_nbp_state;
- u8 display_wm;
- u8 vce_wm;
-};
-
-struct kv_ps {
- struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
- u32 num_levels;
- bool need_dfs_bypass;
- u8 dpm0_pg_nb_ps_lo;
- u8 dpm0_pg_nb_ps_hi;
- u8 dpmx_nb_ps_lo;
- u8 dpmx_nb_ps_hi;
-};
-
-struct kv_sys_info {
- u32 bootup_uma_clk;
- u32 bootup_sclk;
- u32 dentist_vco_freq;
- u32 nb_dpm_enable;
- u32 nbp_memory_clock[KV_NUM_NBPSTATES];
- u32 nbp_n_clock[KV_NUM_NBPSTATES];
- u16 bootup_nb_voltage_index;
- u8 htc_tmp_lmt;
- u8 htc_hyst_lmt;
- struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
- struct sumo_vid_mapping_table vid_mapping_table;
- u32 uma_channel_number;
-};
-
-struct kv_power_info {
- u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
- u32 voltage_drop_t;
- struct kv_sys_info sys_info;
- struct kv_pl boot_pl;
- bool enable_nb_ps_policy;
- bool disable_nb_ps3_in_battery;
- bool video_start;
- bool battery_state;
- u32 lowest_valid;
- u32 highest_valid;
- u16 high_voltage_t;
- bool cac_enabled;
- bool bapm_enable;
- /* smc offsets */
- u32 sram_end;
- u32 dpm_table_start;
- u32 soft_regs_start;
- /* dpm SMU tables */
- u8 graphics_dpm_level_count;
- u8 uvd_level_count;
- u8 vce_level_count;
- u8 acp_level_count;
- u8 samu_level_count;
- u16 fps_high_t;
- SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE];
- SMU7_Fusion_ACPILevel acpi_level;
- SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD];
- SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE];
- SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP];
- SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU];
- u8 uvd_boot_level;
- u8 vce_boot_level;
- u8 acp_boot_level;
- u8 samu_boot_level;
- u8 uvd_interval;
- u8 vce_interval;
- u8 acp_interval;
- u8 samu_interval;
- u8 graphics_boot_level;
- u8 graphics_interval;
- u8 graphics_therm_throttle_enable;
- u8 graphics_voltage_change_enable;
- u8 graphics_clk_slow_enable;
- u8 graphics_clk_slow_divider;
- u8 fps_low_t;
- u32 low_sclk_interrupt_t;
- bool uvd_power_gated;
- bool vce_power_gated;
- bool acp_power_gated;
- bool samu_power_gated;
- bool nb_dpm_enabled;
- /* flags */
- bool enable_didt;
- bool enable_dpm;
- bool enable_auto_thermal_throttling;
- bool enable_nb_dpm;
- /* caps */
- bool caps_cac;
- bool caps_power_containment;
- bool caps_sq_ramping;
- bool caps_db_ramping;
- bool caps_td_ramping;
- bool caps_tcp_ramping;
- bool caps_sclk_throttle_low_notification;
- bool caps_fps;
- bool caps_uvd_dpm;
- bool caps_uvd_pg;
- bool caps_vce_pg;
- bool caps_samu_pg;
- bool caps_acp_pg;
- bool caps_stable_p_state;
- bool caps_enable_dfs_bypass;
- bool caps_sclk_ds;
- struct amdgpu_ps current_rps;
- struct kv_ps current_ps;
- struct amdgpu_ps requested_rps;
- struct kv_ps requested_ps;
-};
-
-/* XXX are these ok? */
-#define KV_TEMP_RANGE_MIN (90 * 1000)
-#define KV_TEMP_RANGE_MAX (120 * 1000)
-
-/* kv_smc.c */
-int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id);
-int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask);
-int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 parameter);
-int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 *value, u32 limit);
-int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable);
-int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable);
-int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit);
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_smc.c b/drivers/gpu/drm/amd/amdgpu/kv_smc.c
deleted file mode 100644
index 2d9ab6b8be66..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/kv_smc.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Alex Deucher
- */
-
-#include "amdgpu.h"
-#include "cikd.h"
-#include "kv_dpm.h"
-
-#include "smu/smu_7_0_0_d.h"
-#include "smu/smu_7_0_0_sh_mask.h"
-
-int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id)
-{
- u32 i;
- u32 tmp = 0;
-
- WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0)
- break;
- udelay(1);
- }
- tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK;
-
- if (tmp != 1) {
- if (tmp == 0xFF)
- return -EINVAL;
- else if (tmp == 0xFE)
- return -EINVAL;
- }
-
- return 0;
-}
-
-int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask)
-{
- int ret;
-
- ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
-
- if (ret == 0)
- *enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0);
-
- return ret;
-}
-
-int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 parameter)
-{
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return amdgpu_kv_notify_message_to_smu(adev, msg);
-}
-
-static int kv_set_smc_sram_address(struct amdgpu_device *adev,
- u32 smc_address, u32 limit)
-{
- if (smc_address & 3)
- return -EINVAL;
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
- WREG32_P(mmSMC_IND_ACCESS_CNTL, 0,
- ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
-
- return 0;
-}
-
-int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 *value, u32 limit)
-{
- int ret;
-
- ret = kv_set_smc_sram_address(adev, smc_address, limit);
- if (ret)
- return ret;
-
- *value = RREG32(mmSMC_IND_DATA_0);
- return 0;
-}
-
-int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable)
-{
- if (enable)
- return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable);
- else
- return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable);
-}
-
-int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable)
-{
- if (enable)
- return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM);
- else
- return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM);
-}
-
-int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit)
-{
- int ret;
- u32 data, original_data, addr, extra_shift, t_byte, count, mask;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
- t_byte = addr & 3;
-
- /* RMW for the initial bytes */
- if (t_byte != 0) {
- addr -= t_byte;
-
- ret = kv_set_smc_sram_address(adev, addr, limit);
- if (ret)
- return ret;
-
- original_data = RREG32(mmSMC_IND_DATA_0);
-
- data = 0;
- mask = 0;
- count = 4;
- while (count > 0) {
- if (t_byte > 0) {
- mask = (mask << 8) | 0xff;
- t_byte--;
- } else if (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- mask <<= 8;
- } else {
- data <<= 8;
- mask = (mask << 8) | 0xff;
- }
- count--;
- }
-
- data |= original_data & mask;
-
- ret = kv_set_smc_sram_address(adev, addr, limit);
- if (ret)
- return ret;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- addr += 4;
- }
-
- while (byte_count >= 4) {
- /* SMC address space is BE */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- ret = kv_set_smc_sram_address(adev, addr, limit);
- if (ret)
- return ret;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- /* RMW for the final bytes */
- if (byte_count > 0) {
- data = 0;
-
- ret = kv_set_smc_sram_address(adev, addr, limit);
- if (ret)
- return ret;
-
- original_data = RREG32(mmSMC_IND_DATA_0);
-
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- /* SMC address space is BE */
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
-
- data |= (original_data & ~((~0UL) << extra_shift));
-
- ret = kv_set_smc_sram_address(adev, addr, limit);
- if (ret)
- return ret;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
- return 0;
-}
-
diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h
deleted file mode 100644
index 8463245f424f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef PP_SMC_H
-#define PP_SMC_H
-
-#pragma pack(push, 1)
-
-#define PPSMC_SWSTATE_FLAG_DC 0x01
-#define PPSMC_SWSTATE_FLAG_UVD 0x02
-#define PPSMC_SWSTATE_FLAG_VCE 0x04
-#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08
-
-#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
-#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
-#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
-
-#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
-#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
-#define PPSMC_SYSTEMFLAG_GDDR5 0x04
-#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
-#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
-#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
-#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO 0x40
-
-#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
-#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
-#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
-#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
-#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x02
-
-#define PPSMC_DISPLAY_WATERMARK_LOW 0
-#define PPSMC_DISPLAY_WATERMARK_HIGH 1
-
-#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
-#define PPSMC_STATEFLAG_POWERBOOST 0x02
-#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
-#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
-
-#define FDO_MODE_HARDWARE 0
-#define FDO_MODE_PIECE_WISE_LINEAR 1
-
-enum FAN_CONTROL {
- FAN_CONTROL_FUZZY,
- FAN_CONTROL_TABLE
-};
-
-#define PPSMC_Result_OK ((uint8_t)0x01)
-#define PPSMC_Result_Failed ((uint8_t)0xFF)
-
-typedef uint8_t PPSMC_Result;
-
-#define PPSMC_MSG_Halt ((uint8_t)0x10)
-#define PPSMC_MSG_Resume ((uint8_t)0x11)
-#define PPSMC_MSG_ZeroLevelsDisabled ((uint8_t)0x13)
-#define PPSMC_MSG_OneLevelsDisabled ((uint8_t)0x14)
-#define PPSMC_MSG_TwoLevelsDisabled ((uint8_t)0x15)
-#define PPSMC_MSG_EnableThermalInterrupt ((uint8_t)0x16)
-#define PPSMC_MSG_RunningOnAC ((uint8_t)0x17)
-#define PPSMC_MSG_SwitchToSwState ((uint8_t)0x20)
-#define PPSMC_MSG_SwitchToInitialState ((uint8_t)0x40)
-#define PPSMC_MSG_NoForcedLevel ((uint8_t)0x41)
-#define PPSMC_MSG_ForceHigh ((uint8_t)0x42)
-#define PPSMC_MSG_ForceMediumOrHigh ((uint8_t)0x43)
-#define PPSMC_MSG_SwitchToMinimumPower ((uint8_t)0x51)
-#define PPSMC_MSG_ResumeFromMinimumPower ((uint8_t)0x52)
-#define PPSMC_MSG_EnableCac ((uint8_t)0x53)
-#define PPSMC_MSG_DisableCac ((uint8_t)0x54)
-#define PPSMC_TDPClampingActive ((uint8_t)0x59)
-#define PPSMC_TDPClampingInactive ((uint8_t)0x5A)
-#define PPSMC_StartFanControl ((uint8_t)0x5B)
-#define PPSMC_StopFanControl ((uint8_t)0x5C)
-#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
-#define PPSMC_NoDisplay ((uint8_t)0x5D)
-#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
-#define PPSMC_HasDisplay ((uint8_t)0x5E)
-#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
-#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61)
-#define PPSMC_MSG_EnableULV ((uint8_t)0x62)
-#define PPSMC_MSG_DisableULV ((uint8_t)0x63)
-#define PPSMC_MSG_EnterULV ((uint8_t)0x64)
-#define PPSMC_MSG_ExitULV ((uint8_t)0x65)
-#define PPSMC_CACLongTermAvgEnable ((uint8_t)0x6E)
-#define PPSMC_CACLongTermAvgDisable ((uint8_t)0x6F)
-#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint8_t)0x7A)
-#define PPSMC_FlushDataCache ((uint8_t)0x80)
-#define PPSMC_MSG_SetEnabledLevels ((uint8_t)0x82)
-#define PPSMC_MSG_SetForcedLevels ((uint8_t)0x83)
-#define PPSMC_MSG_ResetToDefaults ((uint8_t)0x84)
-#define PPSMC_MSG_EnableDTE ((uint8_t)0x87)
-#define PPSMC_MSG_DisableDTE ((uint8_t)0x88)
-#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
-#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
-#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
-
-/* CI/KV/KB */
-#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
-#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
-#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
-#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
-#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
-#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
-#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
-#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
-#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
-#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
-#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
-#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
-#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
-#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
-#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
-#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
-#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
-#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
-#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
-#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
-#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
-#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
-#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
-#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
-#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
-#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
-#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
-#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
-#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
-#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
-#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
-#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
-#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
-#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
-#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
-#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
-#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
-#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
-#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
-#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
-#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
-#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
-#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
-#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
-#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
-#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
-#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
-#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
-#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
-#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
-#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
-#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
-
-#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
-#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
-
-#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
-#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
-
-/* TN */
-#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
-#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
-#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
-#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
-#define PPSMC_MSG_Thermal_Cntl_Enable ((uint32_t) 0x10a)
-#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
-#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
-#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
-#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
-#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
-#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
-#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121)
-#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
-
-#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
-#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
-#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
-#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
-#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
-
-typedef uint16_t PPSMC_Msg;
-
-#pragma pack(pop)
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
deleted file mode 100644
index 055321f61ca7..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __R600_DPM_H__
-#define __R600_DPM_H__
-
-#define R600_ASI_DFLT 10000
-#define R600_BSP_DFLT 0x41EB
-#define R600_BSU_DFLT 0x2
-#define R600_AH_DFLT 5
-#define R600_RLP_DFLT 25
-#define R600_RMP_DFLT 65
-#define R600_LHP_DFLT 40
-#define R600_LMP_DFLT 15
-#define R600_TD_DFLT 0
-#define R600_UTC_DFLT_00 0x24
-#define R600_UTC_DFLT_01 0x22
-#define R600_UTC_DFLT_02 0x22
-#define R600_UTC_DFLT_03 0x22
-#define R600_UTC_DFLT_04 0x22
-#define R600_UTC_DFLT_05 0x22
-#define R600_UTC_DFLT_06 0x22
-#define R600_UTC_DFLT_07 0x22
-#define R600_UTC_DFLT_08 0x22
-#define R600_UTC_DFLT_09 0x22
-#define R600_UTC_DFLT_10 0x22
-#define R600_UTC_DFLT_11 0x22
-#define R600_UTC_DFLT_12 0x22
-#define R600_UTC_DFLT_13 0x22
-#define R600_UTC_DFLT_14 0x22
-#define R600_DTC_DFLT_00 0x24
-#define R600_DTC_DFLT_01 0x22
-#define R600_DTC_DFLT_02 0x22
-#define R600_DTC_DFLT_03 0x22
-#define R600_DTC_DFLT_04 0x22
-#define R600_DTC_DFLT_05 0x22
-#define R600_DTC_DFLT_06 0x22
-#define R600_DTC_DFLT_07 0x22
-#define R600_DTC_DFLT_08 0x22
-#define R600_DTC_DFLT_09 0x22
-#define R600_DTC_DFLT_10 0x22
-#define R600_DTC_DFLT_11 0x22
-#define R600_DTC_DFLT_12 0x22
-#define R600_DTC_DFLT_13 0x22
-#define R600_DTC_DFLT_14 0x22
-#define R600_VRC_DFLT 0x0000C003
-#define R600_VOLTAGERESPONSETIME_DFLT 1000
-#define R600_BACKBIASRESPONSETIME_DFLT 1000
-#define R600_VRU_DFLT 0x3
-#define R600_SPLLSTEPTIME_DFLT 0x1000
-#define R600_SPLLSTEPUNIT_DFLT 0x3
-#define R600_TPU_DFLT 0
-#define R600_TPC_DFLT 0x200
-#define R600_SSTU_DFLT 0
-#define R600_SST_DFLT 0x00C8
-#define R600_GICST_DFLT 0x200
-#define R600_FCT_DFLT 0x0400
-#define R600_FCTU_DFLT 0
-#define R600_CTXCGTT3DRPHC_DFLT 0x20
-#define R600_CTXCGTT3DRSDC_DFLT 0x40
-#define R600_VDDC3DOORPHC_DFLT 0x100
-#define R600_VDDC3DOORSDC_DFLT 0x7
-#define R600_VDDC3DOORSU_DFLT 0
-#define R600_MPLLLOCKTIME_DFLT 100
-#define R600_MPLLRESETTIME_DFLT 150
-#define R600_VCOSTEPPCT_DFLT 20
-#define R600_ENDINGVCOSTEPPCT_DFLT 5
-#define R600_REFERENCEDIVIDER_DFLT 4
-
-#define R600_PM_NUMBER_OF_TC 15
-#define R600_PM_NUMBER_OF_SCLKS 20
-#define R600_PM_NUMBER_OF_MCLKS 4
-#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
-#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
-
-/* XXX are these ok? */
-#define R600_TEMP_RANGE_MIN (90 * 1000)
-#define R600_TEMP_RANGE_MAX (120 * 1000)
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-enum r600_power_level {
- R600_POWER_LEVEL_LOW = 0,
- R600_POWER_LEVEL_MEDIUM = 1,
- R600_POWER_LEVEL_HIGH = 2,
- R600_POWER_LEVEL_CTXSW = 3,
-};
-
-enum r600_td {
- R600_TD_AUTO,
- R600_TD_UP,
- R600_TD_DOWN,
-};
-
-enum r600_display_watermark {
- R600_DISPLAY_WATERMARK_LOW = 0,
- R600_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum r600_display_gap
-{
- R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- R600_PM_DISPLAY_GAP_VBLANK = 1,
- R600_PM_DISPLAY_GAP_WATERMARK = 2,
- R600_PM_DISPLAY_GAP_IGNORE = 3,
-};
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
deleted file mode 100644
index ea914b256ebd..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ /dev/null
@@ -1,8079 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_dpm.h"
-#include "amdgpu_atombios.h"
-#include "amd_pcie.h"
-#include "sid.h"
-#include "r600_dpm.h"
-#include "si_dpm.h"
-#include "atom.h"
-#include "../include/pptable.h"
-#include <linux/math64.h>
-#include <linux/seq_file.h>
-#include <linux/firmware.h>
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-#define SMC_RAM_END 0x20000
-
-#define SCLK_MIN_DEEPSLEEP_FREQ 1350
-
-
-/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
-
-#define BIOS_SCRATCH_4 0x5cd
-
-MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
-MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
-MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/verde_smc.bin");
-MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/oland_smc.bin");
-MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
-MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
-
-static const struct amd_pm_funcs si_dpm_funcs;
-
-union power_info {
- struct _ATOM_POWERPLAY_INFO info;
- struct _ATOM_POWERPLAY_INFO_V2 info_2;
- struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
- struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
- struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
-};
-
-union fan_info {
- struct _ATOM_PPLIB_FANTABLE fan;
- struct _ATOM_PPLIB_FANTABLE2 fan2;
- struct _ATOM_PPLIB_FANTABLE3 fan3;
-};
-
-union pplib_clock_info {
- struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
- struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
- struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
- struct _ATOM_PPLIB_SI_CLOCK_INFO si;
-};
-
-static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
-{
- R600_UTC_DFLT_00,
- R600_UTC_DFLT_01,
- R600_UTC_DFLT_02,
- R600_UTC_DFLT_03,
- R600_UTC_DFLT_04,
- R600_UTC_DFLT_05,
- R600_UTC_DFLT_06,
- R600_UTC_DFLT_07,
- R600_UTC_DFLT_08,
- R600_UTC_DFLT_09,
- R600_UTC_DFLT_10,
- R600_UTC_DFLT_11,
- R600_UTC_DFLT_12,
- R600_UTC_DFLT_13,
- R600_UTC_DFLT_14,
-};
-
-static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
-{
- R600_DTC_DFLT_00,
- R600_DTC_DFLT_01,
- R600_DTC_DFLT_02,
- R600_DTC_DFLT_03,
- R600_DTC_DFLT_04,
- R600_DTC_DFLT_05,
- R600_DTC_DFLT_06,
- R600_DTC_DFLT_07,
- R600_DTC_DFLT_08,
- R600_DTC_DFLT_09,
- R600_DTC_DFLT_10,
- R600_DTC_DFLT_11,
- R600_DTC_DFLT_12,
- R600_DTC_DFLT_13,
- R600_DTC_DFLT_14,
-};
-
-static const struct si_cac_config_reg cac_weights_tahiti[] =
-{
- { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg lcac_tahiti[] =
-{
- { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-
-};
-
-static const struct si_cac_config_reg cac_override_tahiti[] =
-{
- { 0xFFFFFFFF }
-};
-
-static const struct si_powertune_data powertune_data_tahiti =
-{
- ((1 << 16) | 27027),
- 6,
- 0,
- 4,
- 95,
- {
- 0UL,
- 0UL,
- 4521550UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 40
- },
- 595000000UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static const struct si_dte_data dte_data_tahiti =
-{
- { 1159409, 0, 0, 0, 0 },
- { 777, 0, 0, 0, 0 },
- 2,
- 54000,
- 127000,
- 25,
- 2,
- 10,
- 13,
- { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
- { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
- { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
- 85,
- false
-};
-
-static const struct si_dte_data dte_data_tahiti_pro =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_new_zealand =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
- { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
- 0x5,
- 0xAFC8,
- 0x69,
- 0x32,
- 1,
- 0,
- 0x10,
- { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
- 85,
- true
-};
-
-static const struct si_dte_data dte_data_aruba_pro =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_malta =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_cac_config_reg cac_weights_pitcairn[] =
-{
- { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg lcac_pitcairn[] =
-{
- { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_override_pitcairn[] =
-{
- { 0xFFFFFFFF }
-};
-
-static const struct si_powertune_data powertune_data_pitcairn =
-{
- ((1 << 16) | 27027),
- 5,
- 0,
- 6,
- 100,
- {
- 51600000UL,
- 1800000UL,
- 7194395UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 100
- },
- 117830498UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static const struct si_dte_data dte_data_pitcairn =
-{
- { 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 },
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- 0,
- false
-};
-
-static const struct si_dte_data dte_data_curacao_xt =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_curacao_pro =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_neptune_xt =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_heathrow[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_cape_verde[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg lcac_cape_verde[] =
-{
- { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_override_cape_verde[] =
-{
- { 0xFFFFFFFF }
-};
-
-static const struct si_powertune_data powertune_data_cape_verde =
-{
- ((1 << 16) | 0x6993),
- 5,
- 0,
- 7,
- 105,
- {
- 0UL,
- 0UL,
- 7194395UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 100
- },
- 117830498UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static const struct si_dte_data dte_data_cape_verde =
-{
- { 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 },
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- 0,
- false
-};
-
-static const struct si_dte_data dte_data_venus_xtx =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
- 5,
- 55000,
- 0x69,
- 0xA,
- 1,
- 0,
- 0x3,
- { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_venus_xt =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
- 5,
- 55000,
- 0x69,
- 0xA,
- 1,
- 0,
- 0x3,
- { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_venus_pro =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
- 5,
- 55000,
- 0x69,
- 0xA,
- 1,
- 0,
- 0x3,
- { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_cac_config_reg cac_weights_oland[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_mars_pro[] =
-{
- { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_mars_xt[] =
-{
- { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_oland_pro[] =
-{
- { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_oland_xt[] =
-{
- { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg lcac_oland[] =
-{
- { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg lcac_mars_pro[] =
-{
- { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_override_oland[] =
-{
- { 0xFFFFFFFF }
-};
-
-static const struct si_powertune_data powertune_data_oland =
-{
- ((1 << 16) | 0x6993),
- 5,
- 0,
- 7,
- 105,
- {
- 0UL,
- 0UL,
- 7194395UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 100
- },
- 117830498UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static const struct si_powertune_data powertune_data_mars_pro =
-{
- ((1 << 16) | 0x6993),
- 5,
- 0,
- 7,
- 105,
- {
- 0UL,
- 0UL,
- 7194395UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 100
- },
- 117830498UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static const struct si_dte_data dte_data_oland =
-{
- { 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 },
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- 0,
- false
-};
-
-static const struct si_dte_data dte_data_mars_pro =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 55000,
- 105,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_sun_xt =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 55000,
- 105,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-
-static const struct si_cac_config_reg cac_weights_hainan[] =
-{
- { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_powertune_data powertune_data_hainan =
-{
- ((1 << 16) | 0x6993),
- 5,
- 0,
- 9,
- 105,
- {
- 0UL,
- 0UL,
- 7194395UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 100
- },
- 117830498UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
-static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
-static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
-static struct si_ps *si_get_ps(struct amdgpu_ps *rps);
-
-static int si_populate_voltage_value(struct amdgpu_device *adev,
- const struct atom_voltage_table *table,
- u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
-static int si_get_std_voltage_value(struct amdgpu_device *adev,
- SISLANDS_SMC_VOLTAGE_VALUE *voltage,
- u16 *std_voltage);
-static int si_write_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 value);
-static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
- struct rv7xx_pl *pl,
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
-static int si_calculate_sclk_params(struct amdgpu_device *adev,
- u32 engine_clock,
- SISLANDS_SMC_SCLK_VALUE *sclk);
-
-static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
-static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
-static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
-
-static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
-{
- struct si_power_info *pi = adev->pm.dpm.priv;
- return pi;
-}
-
-static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
- u16 v, s32 t, u32 ileakage, u32 *leakage)
-{
- s64 kt, kv, leakage_w, i_leakage, vddc;
- s64 temperature, t_slope, t_intercept, av, bv, t_ref;
- s64 tmp;
-
- i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
- vddc = div64_s64(drm_int2fixp(v), 1000);
- temperature = div64_s64(drm_int2fixp(t), 1000);
-
- t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
- t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
- av = div64_s64(drm_int2fixp(coeff->av), 100000000);
- bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
- t_ref = drm_int2fixp(coeff->t_ref);
-
- tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
- kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
- kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
- kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
-
- leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
-
- *leakage = drm_fixp2int(leakage_w * 1000);
-}
-
-static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
- const struct ni_leakage_coeffients *coeff,
- u16 v,
- s32 t,
- u32 i_leakage,
- u32 *leakage)
-{
- si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
-}
-
-static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
- const u32 fixed_kt, u16 v,
- u32 ileakage, u32 *leakage)
-{
- s64 kt, kv, leakage_w, i_leakage, vddc;
-
- i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
- vddc = div64_s64(drm_int2fixp(v), 1000);
-
- kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
- kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
- drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
-
- leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
-
- *leakage = drm_fixp2int(leakage_w * 1000);
-}
-
-static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
- const struct ni_leakage_coeffients *coeff,
- const u32 fixed_kt,
- u16 v,
- u32 i_leakage,
- u32 *leakage)
-{
- si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
-}
-
-
-static void si_update_dte_from_pl2(struct amdgpu_device *adev,
- struct si_dte_data *dte_data)
-{
- u32 p_limit1 = adev->pm.dpm.tdp_limit;
- u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
- u32 k = dte_data->k;
- u32 t_max = dte_data->max_t;
- u32 t_split[5] = { 10, 15, 20, 25, 30 };
- u32 t_0 = dte_data->t0;
- u32 i;
-
- if (p_limit2 != 0 && p_limit2 <= p_limit1) {
- dte_data->tdep_count = 3;
-
- for (i = 0; i < k; i++) {
- dte_data->r[i] =
- (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
- (p_limit2 * (u32)100);
- }
-
- dte_data->tdep_r[1] = dte_data->r[4] * 2;
-
- for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
- dte_data->tdep_r[i] = dte_data->r[4];
- }
- } else {
- DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
- }
-}
-
-static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
-{
- struct ni_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
-{
- struct si_ps *ps = aps->ps_priv;
-
- return ps;
-}
-
-static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- bool update_dte_from_pl2 = false;
-
- if (adev->asic_type == CHIP_TAHITI) {
- si_pi->cac_weights = cac_weights_tahiti;
- si_pi->lcac_config = lcac_tahiti;
- si_pi->cac_override = cac_override_tahiti;
- si_pi->powertune_data = &powertune_data_tahiti;
- si_pi->dte_data = dte_data_tahiti;
-
- switch (adev->pdev->device) {
- case 0x6798:
- si_pi->dte_data.enable_dte_by_default = true;
- break;
- case 0x6799:
- si_pi->dte_data = dte_data_new_zealand;
- break;
- case 0x6790:
- case 0x6791:
- case 0x6792:
- case 0x679E:
- si_pi->dte_data = dte_data_aruba_pro;
- update_dte_from_pl2 = true;
- break;
- case 0x679B:
- si_pi->dte_data = dte_data_malta;
- update_dte_from_pl2 = true;
- break;
- case 0x679A:
- si_pi->dte_data = dte_data_tahiti_pro;
- update_dte_from_pl2 = true;
- break;
- default:
- if (si_pi->dte_data.enable_dte_by_default == true)
- DRM_ERROR("DTE is not enabled!\n");
- break;
- }
- } else if (adev->asic_type == CHIP_PITCAIRN) {
- si_pi->cac_weights = cac_weights_pitcairn;
- si_pi->lcac_config = lcac_pitcairn;
- si_pi->cac_override = cac_override_pitcairn;
- si_pi->powertune_data = &powertune_data_pitcairn;
-
- switch (adev->pdev->device) {
- case 0x6810:
- case 0x6818:
- si_pi->dte_data = dte_data_curacao_xt;
- update_dte_from_pl2 = true;
- break;
- case 0x6819:
- case 0x6811:
- si_pi->dte_data = dte_data_curacao_pro;
- update_dte_from_pl2 = true;
- break;
- case 0x6800:
- case 0x6806:
- si_pi->dte_data = dte_data_neptune_xt;
- update_dte_from_pl2 = true;
- break;
- default:
- si_pi->dte_data = dte_data_pitcairn;
- break;
- }
- } else if (adev->asic_type == CHIP_VERDE) {
- si_pi->lcac_config = lcac_cape_verde;
- si_pi->cac_override = cac_override_cape_verde;
- si_pi->powertune_data = &powertune_data_cape_verde;
-
- switch (adev->pdev->device) {
- case 0x683B:
- case 0x683F:
- case 0x6829:
- case 0x6835:
- si_pi->cac_weights = cac_weights_cape_verde_pro;
- si_pi->dte_data = dte_data_cape_verde;
- break;
- case 0x682C:
- si_pi->cac_weights = cac_weights_cape_verde_pro;
- si_pi->dte_data = dte_data_sun_xt;
- update_dte_from_pl2 = true;
- break;
- case 0x6825:
- case 0x6827:
- si_pi->cac_weights = cac_weights_heathrow;
- si_pi->dte_data = dte_data_cape_verde;
- break;
- case 0x6824:
- case 0x682D:
- si_pi->cac_weights = cac_weights_chelsea_xt;
- si_pi->dte_data = dte_data_cape_verde;
- break;
- case 0x682F:
- si_pi->cac_weights = cac_weights_chelsea_pro;
- si_pi->dte_data = dte_data_cape_verde;
- break;
- case 0x6820:
- si_pi->cac_weights = cac_weights_heathrow;
- si_pi->dte_data = dte_data_venus_xtx;
- break;
- case 0x6821:
- si_pi->cac_weights = cac_weights_heathrow;
- si_pi->dte_data = dte_data_venus_xt;
- break;
- case 0x6823:
- case 0x682B:
- case 0x6822:
- case 0x682A:
- si_pi->cac_weights = cac_weights_chelsea_pro;
- si_pi->dte_data = dte_data_venus_pro;
- break;
- default:
- si_pi->cac_weights = cac_weights_cape_verde;
- si_pi->dte_data = dte_data_cape_verde;
- break;
- }
- } else if (adev->asic_type == CHIP_OLAND) {
- si_pi->lcac_config = lcac_mars_pro;
- si_pi->cac_override = cac_override_oland;
- si_pi->powertune_data = &powertune_data_mars_pro;
- si_pi->dte_data = dte_data_mars_pro;
-
- switch (adev->pdev->device) {
- case 0x6601:
- case 0x6621:
- case 0x6603:
- case 0x6605:
- si_pi->cac_weights = cac_weights_mars_pro;
- update_dte_from_pl2 = true;
- break;
- case 0x6600:
- case 0x6606:
- case 0x6620:
- case 0x6604:
- si_pi->cac_weights = cac_weights_mars_xt;
- update_dte_from_pl2 = true;
- break;
- case 0x6611:
- case 0x6613:
- case 0x6608:
- si_pi->cac_weights = cac_weights_oland_pro;
- update_dte_from_pl2 = true;
- break;
- case 0x6610:
- si_pi->cac_weights = cac_weights_oland_xt;
- update_dte_from_pl2 = true;
- break;
- default:
- si_pi->cac_weights = cac_weights_oland;
- si_pi->lcac_config = lcac_oland;
- si_pi->cac_override = cac_override_oland;
- si_pi->powertune_data = &powertune_data_oland;
- si_pi->dte_data = dte_data_oland;
- break;
- }
- } else if (adev->asic_type == CHIP_HAINAN) {
- si_pi->cac_weights = cac_weights_hainan;
- si_pi->lcac_config = lcac_oland;
- si_pi->cac_override = cac_override_oland;
- si_pi->powertune_data = &powertune_data_hainan;
- si_pi->dte_data = dte_data_sun_xt;
- update_dte_from_pl2 = true;
- } else {
- DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
- return;
- }
-
- ni_pi->enable_power_containment = false;
- ni_pi->enable_cac = false;
- ni_pi->enable_sq_ramping = false;
- si_pi->enable_dte = false;
-
- if (si_pi->powertune_data->enable_powertune_by_default) {
- ni_pi->enable_power_containment = true;
- ni_pi->enable_cac = true;
- if (si_pi->dte_data.enable_dte_by_default) {
- si_pi->enable_dte = true;
- if (update_dte_from_pl2)
- si_update_dte_from_pl2(adev, &si_pi->dte_data);
-
- }
- ni_pi->enable_sq_ramping = true;
- }
-
- ni_pi->driver_calculate_cac_leakage = true;
- ni_pi->cac_configuration_required = true;
-
- if (ni_pi->cac_configuration_required) {
- ni_pi->support_cac_long_term_average = true;
- si_pi->dyn_powertune_data.l2_lta_window_size =
- si_pi->powertune_data->l2_lta_window_size_default;
- si_pi->dyn_powertune_data.lts_truncate =
- si_pi->powertune_data->lts_truncate_default;
- } else {
- ni_pi->support_cac_long_term_average = false;
- si_pi->dyn_powertune_data.l2_lta_window_size = 0;
- si_pi->dyn_powertune_data.lts_truncate = 0;
- }
-
- si_pi->dyn_powertune_data.disable_uvd_powertune = false;
-}
-
-static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
-{
- return 1;
-}
-
-static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
-{
- u32 xclk;
- u32 wintime;
- u32 cac_window;
- u32 cac_window_size;
-
- xclk = amdgpu_asic_get_xclk(adev);
-
- if (xclk == 0)
- return 0;
-
- cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
- cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
-
- wintime = (cac_window_size * 100) / xclk;
-
- return wintime;
-}
-
-static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
-{
- return power_in_watts;
-}
-
-static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
- bool adjust_polarity,
- u32 tdp_adjustment,
- u32 *tdp_limit,
- u32 *near_tdp_limit)
-{
- u32 adjustment_delta, max_tdp_limit;
-
- if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
- return -EINVAL;
-
- max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
-
- if (adjust_polarity) {
- *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
- *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
- } else {
- *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
- adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit;
- if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
- *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
- else
- *near_tdp_limit = 0;
- }
-
- if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
- return -EINVAL;
- if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
- return -EINVAL;
-
- return 0;
-}
-
-static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (ni_pi->enable_power_containment) {
- SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
- PP_SIslands_PAPMParameters *papm_parm;
- struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
- u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
- u32 tdp_limit;
- u32 near_tdp_limit;
- int ret;
-
- if (scaling_factor == 0)
- return -EINVAL;
-
- memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
-
- ret = si_calculate_adjusted_tdp_limits(adev,
- false, /* ??? */
- adev->pm.dpm.tdp_adjustment,
- &tdp_limit,
- &near_tdp_limit);
- if (ret)
- return ret;
-
- smc_table->dpm2Params.TDPLimit =
- cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
- smc_table->dpm2Params.NearTDPLimit =
- cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
- smc_table->dpm2Params.SafePowerLimit =
- cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
-
- ret = amdgpu_si_copy_bytes_to_smc(adev,
- (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
- offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
- (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
- sizeof(u32) * 3,
- si_pi->sram_end);
- if (ret)
- return ret;
-
- if (si_pi->enable_ppm) {
- papm_parm = &si_pi->papm_parm;
- memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
- papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
- papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
- papm_parm->dGPU_T_Warning = cpu_to_be32(95);
- papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
- papm_parm->PlatformPowerLimit = 0xffffffff;
- papm_parm->NearTDPLimitPAPM = 0xffffffff;
-
- ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
- (u8 *)papm_parm,
- sizeof(PP_SIslands_PAPMParameters),
- si_pi->sram_end);
- if (ret)
- return ret;
- }
- }
- return 0;
-}
-
-static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (ni_pi->enable_power_containment) {
- SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
- u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
- int ret;
-
- memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
-
- smc_table->dpm2Params.NearTDPLimit =
- cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
- smc_table->dpm2Params.SafePowerLimit =
- cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
-
- ret = amdgpu_si_copy_bytes_to_smc(adev,
- (si_pi->state_table_start +
- offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
- offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
- (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
- sizeof(u32) * 2,
- si_pi->sram_end);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
- const u16 prev_std_vddc,
- const u16 curr_std_vddc)
-{
- u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
- u64 prev_vddc = (u64)prev_std_vddc;
- u64 curr_vddc = (u64)curr_std_vddc;
- u64 pwr_efficiency_ratio, n, d;
-
- if ((prev_vddc == 0) || (curr_vddc == 0))
- return 0;
-
- n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
- d = prev_vddc * prev_vddc;
- pwr_efficiency_ratio = div64_u64(n, d);
-
- if (pwr_efficiency_ratio > (u64)0xFFFF)
- return 0;
-
- return (u16)pwr_efficiency_ratio;
-}
-
-static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
- amdgpu_state->vclk && amdgpu_state->dclk)
- return true;
-
- return false;
-}
-
-struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
-{
- struct evergreen_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-static int si_populate_power_containment_values(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SISLANDS_SMC_SWSTATE *smc_state)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_ps *state = si_get_ps(amdgpu_state);
- SISLANDS_SMC_VOLTAGE_VALUE vddc;
- u32 prev_sclk;
- u32 max_sclk;
- u32 min_sclk;
- u16 prev_std_vddc;
- u16 curr_std_vddc;
- int i;
- u16 pwr_efficiency_ratio;
- u8 max_ps_percent;
- bool disable_uvd_power_tune;
- int ret;
-
- if (ni_pi->enable_power_containment == false)
- return 0;
-
- if (state->performance_level_count == 0)
- return -EINVAL;
-
- if (smc_state->levelCount != state->performance_level_count)
- return -EINVAL;
-
- disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
-
- smc_state->levels[0].dpm2.MaxPS = 0;
- smc_state->levels[0].dpm2.NearTDPDec = 0;
- smc_state->levels[0].dpm2.AboveSafeInc = 0;
- smc_state->levels[0].dpm2.BelowSafeInc = 0;
- smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
-
- for (i = 1; i < state->performance_level_count; i++) {
- prev_sclk = state->performance_levels[i-1].sclk;
- max_sclk = state->performance_levels[i].sclk;
- if (i == 1)
- max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
- else
- max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
-
- if (prev_sclk > max_sclk)
- return -EINVAL;
-
- if ((max_ps_percent == 0) ||
- (prev_sclk == max_sclk) ||
- disable_uvd_power_tune)
- min_sclk = max_sclk;
- else if (i == 1)
- min_sclk = prev_sclk;
- else
- min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
-
- if (min_sclk < state->performance_levels[0].sclk)
- min_sclk = state->performance_levels[0].sclk;
-
- if (min_sclk == 0)
- return -EINVAL;
-
- ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- state->performance_levels[i-1].vddc, &vddc);
- if (ret)
- return ret;
-
- ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
- if (ret)
- return ret;
-
- ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- state->performance_levels[i].vddc, &vddc);
- if (ret)
- return ret;
-
- ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
- if (ret)
- return ret;
-
- pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
- prev_std_vddc, curr_std_vddc);
-
- smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
- smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
- smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
- smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
- smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
- }
-
- return 0;
-}
-
-static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SISLANDS_SMC_SWSTATE *smc_state)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_ps *state = si_get_ps(amdgpu_state);
- u32 sq_power_throttle, sq_power_throttle2;
- bool enable_sq_ramping = ni_pi->enable_sq_ramping;
- int i;
-
- if (state->performance_level_count == 0)
- return -EINVAL;
-
- if (smc_state->levelCount != state->performance_level_count)
- return -EINVAL;
-
- if (adev->pm.dpm.sq_ramping_threshold == 0)
- return -EINVAL;
-
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
- enable_sq_ramping = false;
-
- if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
- enable_sq_ramping = false;
-
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
- enable_sq_ramping = false;
-
- if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
- enable_sq_ramping = false;
-
- if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
- enable_sq_ramping = false;
-
- for (i = 0; i < state->performance_level_count; i++) {
- sq_power_throttle = 0;
- sq_power_throttle2 = 0;
-
- if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
- enable_sq_ramping) {
- sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
- sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
- sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
- sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
- sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
- } else {
- sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
- sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- }
-
- smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
- smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
- }
-
- return 0;
-}
-
-static int si_enable_power_containment(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- bool enable)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- PPSMC_Result smc_result;
- int ret = 0;
-
- if (ni_pi->enable_power_containment) {
- if (enable) {
- if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
- if (smc_result != PPSMC_Result_OK) {
- ret = -EINVAL;
- ni_pi->pc_enabled = false;
- } else {
- ni_pi->pc_enabled = true;
- }
- }
- } else {
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
- if (smc_result != PPSMC_Result_OK)
- ret = -EINVAL;
- ni_pi->pc_enabled = false;
- }
- }
-
- return ret;
-}
-
-static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- int ret = 0;
- struct si_dte_data *dte_data = &si_pi->dte_data;
- Smc_SIslands_DTE_Configuration *dte_tables = NULL;
- u32 table_size;
- u8 tdep_count;
- u32 i;
-
- if (dte_data == NULL)
- si_pi->enable_dte = false;
-
- if (si_pi->enable_dte == false)
- return 0;
-
- if (dte_data->k <= 0)
- return -EINVAL;
-
- dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
- if (dte_tables == NULL) {
- si_pi->enable_dte = false;
- return -ENOMEM;
- }
-
- table_size = dte_data->k;
-
- if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
- table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
-
- tdep_count = dte_data->tdep_count;
- if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
- tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
-
- dte_tables->K = cpu_to_be32(table_size);
- dte_tables->T0 = cpu_to_be32(dte_data->t0);
- dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
- dte_tables->WindowSize = dte_data->window_size;
- dte_tables->temp_select = dte_data->temp_select;
- dte_tables->DTE_mode = dte_data->dte_mode;
- dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
-
- if (tdep_count > 0)
- table_size--;
-
- for (i = 0; i < table_size; i++) {
- dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
- dte_tables->R[i] = cpu_to_be32(dte_data->r[i]);
- }
-
- dte_tables->Tdep_count = tdep_count;
-
- for (i = 0; i < (u32)tdep_count; i++) {
- dte_tables->T_limits[i] = dte_data->t_limits[i];
- dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
- dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
- }
-
- ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
- (u8 *)dte_tables,
- sizeof(Smc_SIslands_DTE_Configuration),
- si_pi->sram_end);
- kfree(dte_tables);
-
- return ret;
-}
-
-static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
- u16 *max, u16 *min)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct amdgpu_cac_leakage_table *table =
- &adev->pm.dpm.dyn_state.cac_leakage_table;
- u32 i;
- u32 v0_loadline;
-
- if (table == NULL)
- return -EINVAL;
-
- *max = 0;
- *min = 0xFFFF;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].vddc > *max)
- *max = table->entries[i].vddc;
- if (table->entries[i].vddc < *min)
- *min = table->entries[i].vddc;
- }
-
- if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
- return -EINVAL;
-
- v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
-
- if (v0_loadline > 0xFFFFUL)
- return -EINVAL;
-
- *min = (u16)v0_loadline;
-
- if ((*min > *max) || (*max == 0) || (*min == 0))
- return -EINVAL;
-
- return 0;
-}
-
-static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
-{
- return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
- SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
-}
-
-static int si_init_dte_leakage_table(struct amdgpu_device *adev,
- PP_SIslands_CacConfig *cac_tables,
- u16 vddc_max, u16 vddc_min, u16 vddc_step,
- u16 t0, u16 t_step)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 leakage;
- unsigned int i, j;
- s32 t;
- u32 smc_leakage;
- u32 scaling_factor;
- u16 voltage;
-
- scaling_factor = si_get_smc_power_scaling_factor(adev);
-
- for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
- t = (1000 * (i * t_step + t0));
-
- for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
- voltage = vddc_max - (vddc_step * j);
-
- si_calculate_leakage_for_v_and_t(adev,
- &si_pi->powertune_data->leakage_coefficients,
- voltage,
- t,
- si_pi->dyn_powertune_data.cac_leakage,
- &leakage);
-
- smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
-
- if (smc_leakage > 0xFFFF)
- smc_leakage = 0xFFFF;
-
- cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
- cpu_to_be16((u16)smc_leakage);
- }
- }
- return 0;
-}
-
-static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
- PP_SIslands_CacConfig *cac_tables,
- u16 vddc_max, u16 vddc_min, u16 vddc_step)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 leakage;
- unsigned int i, j;
- u32 smc_leakage;
- u32 scaling_factor;
- u16 voltage;
-
- scaling_factor = si_get_smc_power_scaling_factor(adev);
-
- for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
- voltage = vddc_max - (vddc_step * j);
-
- si_calculate_leakage_for_v(adev,
- &si_pi->powertune_data->leakage_coefficients,
- si_pi->powertune_data->fixed_kt,
- voltage,
- si_pi->dyn_powertune_data.cac_leakage,
- &leakage);
-
- smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
-
- if (smc_leakage > 0xFFFF)
- smc_leakage = 0xFFFF;
-
- for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
- cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
- cpu_to_be16((u16)smc_leakage);
- }
- return 0;
-}
-
-static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- PP_SIslands_CacConfig *cac_tables = NULL;
- u16 vddc_max, vddc_min, vddc_step;
- u16 t0, t_step;
- u32 load_line_slope, reg;
- int ret = 0;
- u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
-
- if (ni_pi->enable_cac == false)
- return 0;
-
- cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
- if (!cac_tables)
- return -ENOMEM;
-
- reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
- reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
- WREG32(CG_CAC_CTRL, reg);
-
- si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
- si_pi->dyn_powertune_data.dc_pwr_value =
- si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
- si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
- si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
-
- si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
-
- ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
- if (ret)
- goto done_free;
-
- vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
- vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
- t_step = 4;
- t0 = 60;
-
- if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
- ret = si_init_dte_leakage_table(adev, cac_tables,
- vddc_max, vddc_min, vddc_step,
- t0, t_step);
- else
- ret = si_init_simplified_leakage_table(adev, cac_tables,
- vddc_max, vddc_min, vddc_step);
- if (ret)
- goto done_free;
-
- load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
-
- cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
- cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
- cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
- cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
- cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
- cac_tables->R_LL = cpu_to_be32(load_line_slope);
- cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
- cac_tables->calculation_repeats = cpu_to_be32(2);
- cac_tables->dc_cac = cpu_to_be32(0);
- cac_tables->log2_PG_LKG_SCALE = 12;
- cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
- cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
- cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
-
- ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
- (u8 *)cac_tables,
- sizeof(PP_SIslands_CacConfig),
- si_pi->sram_end);
-
- if (ret)
- goto done_free;
-
- ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
-
-done_free:
- if (ret) {
- ni_pi->enable_cac = false;
- ni_pi->enable_power_containment = false;
- }
-
- kfree(cac_tables);
-
- return ret;
-}
-
-static int si_program_cac_config_registers(struct amdgpu_device *adev,
- const struct si_cac_config_reg *cac_config_regs)
-{
- const struct si_cac_config_reg *config_regs = cac_config_regs;
- u32 data = 0, offset;
-
- if (!config_regs)
- return -EINVAL;
-
- while (config_regs->offset != 0xFFFFFFFF) {
- switch (config_regs->type) {
- case SISLANDS_CACCONFIG_CGIND:
- offset = SMC_CG_IND_START + config_regs->offset;
- if (offset < SMC_CG_IND_END)
- data = RREG32_SMC(offset);
- break;
- default:
- data = RREG32(config_regs->offset);
- break;
- }
-
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
-
- switch (config_regs->type) {
- case SISLANDS_CACCONFIG_CGIND:
- offset = SMC_CG_IND_START + config_regs->offset;
- if (offset < SMC_CG_IND_END)
- WREG32_SMC(offset, data);
- break;
- default:
- WREG32(config_regs->offset, data);
- break;
- }
- config_regs++;
- }
- return 0;
-}
-
-static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- int ret;
-
- if ((ni_pi->enable_cac == false) ||
- (ni_pi->cac_configuration_required == false))
- return 0;
-
- ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
- if (ret)
- return ret;
- ret = si_program_cac_config_registers(adev, si_pi->cac_override);
- if (ret)
- return ret;
- ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int si_enable_smc_cac(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- bool enable)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- PPSMC_Result smc_result;
- int ret = 0;
-
- if (ni_pi->enable_cac) {
- if (enable) {
- if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
- if (ni_pi->support_cac_long_term_average) {
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
- if (smc_result != PPSMC_Result_OK)
- ni_pi->support_cac_long_term_average = false;
- }
-
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
- if (smc_result != PPSMC_Result_OK) {
- ret = -EINVAL;
- ni_pi->cac_enabled = false;
- } else {
- ni_pi->cac_enabled = true;
- }
-
- if (si_pi->enable_dte) {
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
- if (smc_result != PPSMC_Result_OK)
- ret = -EINVAL;
- }
- }
- } else if (ni_pi->cac_enabled) {
- if (si_pi->enable_dte)
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
-
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
-
- ni_pi->cac_enabled = false;
-
- if (ni_pi->support_cac_long_term_average)
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
- }
- }
- return ret;
-}
-
-static int si_init_smc_spll_table(struct amdgpu_device *adev)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
- SISLANDS_SMC_SCLK_VALUE sclk_params;
- u32 fb_div, p_div;
- u32 clk_s, clk_v;
- u32 sclk = 0;
- int ret = 0;
- u32 tmp;
- int i;
-
- if (si_pi->spll_table_start == 0)
- return -EINVAL;
-
- spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
- if (spll_table == NULL)
- return -ENOMEM;
-
- for (i = 0; i < 256; i++) {
- ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
- if (ret)
- break;
- p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
- fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
- clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
- clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
-
- fb_div &= ~0x00001FFF;
- fb_div >>= 1;
- clk_v >>= 6;
-
- if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
- ret = -EINVAL;
- if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
- ret = -EINVAL;
- if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
- ret = -EINVAL;
- if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
- ret = -EINVAL;
-
- if (ret)
- break;
-
- tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
- ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
- spll_table->freq[i] = cpu_to_be32(tmp);
-
- tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
- ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
- spll_table->ss[i] = cpu_to_be32(tmp);
-
- sclk += 512;
- }
-
-
- if (!ret)
- ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
- (u8 *)spll_table,
- sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
- si_pi->sram_end);
-
- if (ret)
- ni_pi->enable_power_containment = false;
-
- kfree(spll_table);
-
- return ret;
-}
-
-static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
- u16 vce_voltage)
-{
- u16 highest_leakage = 0;
- struct si_power_info *si_pi = si_get_pi(adev);
- int i;
-
- for (i = 0; i < si_pi->leakage_voltage.count; i++){
- if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
- highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
- }
-
- if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
- return highest_leakage;
-
- return vce_voltage;
-}
-
-static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
- u32 evclk, u32 ecclk, u16 *voltage)
-{
- u32 i;
- int ret = -EINVAL;
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
-
- if (((evclk == 0) && (ecclk == 0)) ||
- (table && (table->count == 0))) {
- *voltage = 0;
- return 0;
- }
-
- for (i = 0; i < table->count; i++) {
- if ((evclk <= table->entries[i].evclk) &&
- (ecclk <= table->entries[i].ecclk)) {
- *voltage = table->entries[i].v;
- ret = 0;
- break;
- }
- }
-
- /* if no match return the highest voltage */
- if (ret)
- *voltage = table->entries[table->count - 1].v;
-
- *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
-
- return ret;
-}
-
-static bool si_dpm_vblank_too_short(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we never hit the non-gddr5 limit so disable it */
- u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
-
- if (vblank_time < switch_limit)
- return true;
- else
- return false;
-
-}
-
-static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
- u32 arb_freq_src, u32 arb_freq_dest)
-{
- u32 mc_arb_dram_timing;
- u32 mc_arb_dram_timing2;
- u32 burst_time;
- u32 mc_cg_config;
-
- switch (arb_freq_src) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
- burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
- break;
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
- burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
- break;
- case MC_CG_ARB_FREQ_F2:
- mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
- mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
- burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
- break;
- case MC_CG_ARB_FREQ_F3:
- mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
- mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
- burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
- break;
- default:
- return -EINVAL;
- }
-
- switch (arb_freq_dest) {
- case MC_CG_ARB_FREQ_F0:
- WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
- break;
- case MC_CG_ARB_FREQ_F1:
- WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
- break;
- case MC_CG_ARB_FREQ_F2:
- WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
- WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
- WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
- break;
- case MC_CG_ARB_FREQ_F3:
- WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
- WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
- WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
- break;
- default:
- return -EINVAL;
- }
-
- mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
- WREG32(MC_CG_CONFIG, mc_cg_config);
- WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
-
- return 0;
-}
-
-static void ni_update_current_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct si_ps *new_ps = si_get_ps(rps);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct ni_power_info *ni_pi = ni_get_pi(adev);
-
- eg_pi->current_rps = *rps;
- ni_pi->current_ps = *new_ps;
- eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
- adev->pm.dpm.current_ps = &eg_pi->current_rps;
-}
-
-static void ni_update_requested_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct si_ps *new_ps = si_get_ps(rps);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct ni_power_info *ni_pi = ni_get_pi(adev);
-
- eg_pi->requested_rps = *rps;
- ni_pi->requested_ps = *new_ps;
- eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
- adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
-}
-
-static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
- struct amdgpu_ps *new_ps,
- struct amdgpu_ps *old_ps)
-{
- struct si_ps *new_state = si_get_ps(new_ps);
- struct si_ps *current_state = si_get_ps(old_ps);
-
- if ((new_ps->vclk == old_ps->vclk) &&
- (new_ps->dclk == old_ps->dclk))
- return;
-
- if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
- current_state->performance_levels[current_state->performance_level_count - 1].sclk)
- return;
-
- amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
-}
-
-static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
- struct amdgpu_ps *new_ps,
- struct amdgpu_ps *old_ps)
-{
- struct si_ps *new_state = si_get_ps(new_ps);
- struct si_ps *current_state = si_get_ps(old_ps);
-
- if ((new_ps->vclk == old_ps->vclk) &&
- (new_ps->dclk == old_ps->dclk))
- return;
-
- if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
- current_state->performance_levels[current_state->performance_level_count - 1].sclk)
- return;
-
- amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
-}
-
-static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
-{
- unsigned int i;
-
- for (i = 0; i < table->count; i++)
- if (voltage <= table->entries[i].value)
- return table->entries[i].value;
-
- return table->entries[table->count - 1].value;
-}
-
-static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
- u32 max_clock, u32 requested_clock)
-{
- unsigned int i;
-
- if ((clocks == NULL) || (clocks->count == 0))
- return (requested_clock < max_clock) ? requested_clock : max_clock;
-
- for (i = 0; i < clocks->count; i++) {
- if (clocks->values[i] >= requested_clock)
- return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
- }
-
- return (clocks->values[clocks->count - 1] < max_clock) ?
- clocks->values[clocks->count - 1] : max_clock;
-}
-
-static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
- u32 max_mclk, u32 requested_mclk)
-{
- return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
- max_mclk, requested_mclk);
-}
-
-static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
- u32 max_sclk, u32 requested_sclk)
-{
- return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
- max_sclk, requested_sclk);
-}
-
-static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
- u32 *max_clock)
-{
- u32 i, clock = 0;
-
- if ((table == NULL) || (table->count == 0)) {
- *max_clock = clock;
- return;
- }
-
- for (i = 0; i < table->count; i++) {
- if (clock < table->entries[i].clk)
- clock = table->entries[i].clk;
- }
- *max_clock = clock;
-}
-
-static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
- u32 clock, u16 max_voltage, u16 *voltage)
-{
- u32 i;
-
- if ((table == NULL) || (table->count == 0))
- return;
-
- for (i= 0; i < table->count; i++) {
- if (clock <= table->entries[i].clk) {
- if (*voltage < table->entries[i].v)
- *voltage = (u16)((table->entries[i].v < max_voltage) ?
- table->entries[i].v : max_voltage);
- return;
- }
- }
-
- *voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
-}
-
-static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
- const struct amdgpu_clock_and_voltage_limits *max_limits,
- struct rv7xx_pl *pl)
-{
-
- if ((pl->mclk == 0) || (pl->sclk == 0))
- return;
-
- if (pl->mclk == pl->sclk)
- return;
-
- if (pl->mclk > pl->sclk) {
- if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
- pl->sclk = btc_get_valid_sclk(adev,
- max_limits->sclk,
- (pl->mclk +
- (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
- adev->pm.dpm.dyn_state.mclk_sclk_ratio);
- } else {
- if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
- pl->mclk = btc_get_valid_mclk(adev,
- max_limits->mclk,
- pl->sclk -
- adev->pm.dpm.dyn_state.sclk_mclk_delta);
- }
-}
-
-static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
- u16 max_vddc, u16 max_vddci,
- u16 *vddc, u16 *vddci)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- u16 new_voltage;
-
- if ((0 == *vddc) || (0 == *vddci))
- return;
-
- if (*vddc > *vddci) {
- if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
- new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
- (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
- *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
- }
- } else {
- if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
- new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
- (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
- *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
- }
- }
-}
-
-static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
- u32 *p, u32 *u)
-{
- u32 b_c = 0;
- u32 i_c;
- u32 tmp;
-
- i_c = (i * r_c) / 100;
- tmp = i_c >> p_b;
-
- while (tmp) {
- b_c++;
- tmp >>= 1;
- }
-
- *u = (b_c + 1) / 2;
- *p = i_c / (1 << (2 * (*u)));
-}
-
-static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
-{
- u32 k, a, ah, al;
- u32 t1;
-
- if ((fl == 0) || (fh == 0) || (fl > fh))
- return -EINVAL;
-
- k = (100 * fh) / fl;
- t1 = (t * (k - 100));
- a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
- a = (a + 5) / 10;
- ah = ((a * t) + 5000) / 10000;
- al = a - ah;
-
- *th = t - ah;
- *tl = t + al;
-
- return 0;
-}
-
-static bool r600_is_uvd_state(u32 class, u32 class2)
-{
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return true;
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return true;
- return false;
-}
-
-static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
-{
- return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
-}
-
-static void rv770_get_max_vddc(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- u16 vddc;
-
- if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
- pi->max_vddc = 0;
- else
- pi->max_vddc = vddc;
-}
-
-static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct amdgpu_atom_ss ss;
-
- pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_ENGINE_SS, 0);
- pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_MEMORY_SS, 0);
-
- if (pi->sclk_ss || pi->mclk_ss)
- pi->dynamic_ss = true;
- else
- pi->dynamic_ss = false;
-}
-
-
-static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct si_ps *ps = si_get_ps(rps);
- struct amdgpu_clock_and_voltage_limits *max_limits;
- bool disable_mclk_switching = false;
- bool disable_sclk_switching = false;
- u32 mclk, sclk;
- u16 vddc, vddci, min_vce_voltage = 0;
- u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
- u32 max_sclk = 0, max_mclk = 0;
- int i;
-
- if (adev->asic_type == CHIP_HAINAN) {
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0xC3) ||
- (adev->pdev->device == 0x6664) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667)) {
- max_sclk = 75000;
- }
- if ((adev->pdev->revision == 0xC3) ||
- (adev->pdev->device == 0x6665)) {
- max_sclk = 60000;
- max_mclk = 80000;
- }
- } else if (adev->asic_type == CHIP_OLAND) {
- if ((adev->pdev->revision == 0xC7) ||
- (adev->pdev->revision == 0x80) ||
- (adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605)) {
- max_sclk = 75000;
- }
- }
-
- if (rps->vce_active) {
- rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
- rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
- si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
- &min_vce_voltage);
- } else {
- rps->evclk = 0;
- rps->ecclk = 0;
- }
-
- if ((adev->pm.dpm.new_active_crtc_count > 1) ||
- si_dpm_vblank_too_short(adev))
- disable_mclk_switching = true;
-
- if (rps->vclk || rps->dclk) {
- disable_mclk_switching = true;
- disable_sclk_switching = true;
- }
-
- if (adev->pm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- for (i = ps->performance_level_count - 2; i >= 0; i--) {
- if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
- ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
- }
- if (adev->pm.ac_power == false) {
- for (i = 0; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].mclk > max_limits->mclk)
- ps->performance_levels[i].mclk = max_limits->mclk;
- if (ps->performance_levels[i].sclk > max_limits->sclk)
- ps->performance_levels[i].sclk = max_limits->sclk;
- if (ps->performance_levels[i].vddc > max_limits->vddc)
- ps->performance_levels[i].vddc = max_limits->vddc;
- if (ps->performance_levels[i].vddci > max_limits->vddci)
- ps->performance_levels[i].vddci = max_limits->vddci;
- }
- }
-
- /* limit clocks to max supported clocks based on voltage dependency tables */
- btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- &max_sclk_vddc);
- btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- &max_mclk_vddci);
- btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- &max_mclk_vddc);
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (max_sclk_vddc) {
- if (ps->performance_levels[i].sclk > max_sclk_vddc)
- ps->performance_levels[i].sclk = max_sclk_vddc;
- }
- if (max_mclk_vddci) {
- if (ps->performance_levels[i].mclk > max_mclk_vddci)
- ps->performance_levels[i].mclk = max_mclk_vddci;
- }
- if (max_mclk_vddc) {
- if (ps->performance_levels[i].mclk > max_mclk_vddc)
- ps->performance_levels[i].mclk = max_mclk_vddc;
- }
- if (max_mclk) {
- if (ps->performance_levels[i].mclk > max_mclk)
- ps->performance_levels[i].mclk = max_mclk;
- }
- if (max_sclk) {
- if (ps->performance_levels[i].sclk > max_sclk)
- ps->performance_levels[i].sclk = max_sclk;
- }
- }
-
- /* XXX validate the min clocks required for display */
-
- if (disable_mclk_switching) {
- mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
- } else {
- mclk = ps->performance_levels[0].mclk;
- vddci = ps->performance_levels[0].vddci;
- }
-
- if (disable_sclk_switching) {
- sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
- vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
- } else {
- sclk = ps->performance_levels[0].sclk;
- vddc = ps->performance_levels[0].vddc;
- }
-
- if (rps->vce_active) {
- if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
- sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
- if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
- mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
- }
-
- /* adjusted low state */
- ps->performance_levels[0].sclk = sclk;
- ps->performance_levels[0].mclk = mclk;
- ps->performance_levels[0].vddc = vddc;
- ps->performance_levels[0].vddci = vddci;
-
- if (disable_sclk_switching) {
- sclk = ps->performance_levels[0].sclk;
- for (i = 1; i < ps->performance_level_count; i++) {
- if (sclk < ps->performance_levels[i].sclk)
- sclk = ps->performance_levels[i].sclk;
- }
- for (i = 0; i < ps->performance_level_count; i++) {
- ps->performance_levels[i].sclk = sclk;
- ps->performance_levels[i].vddc = vddc;
- }
- } else {
- for (i = 1; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
- ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
- if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
- ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
- }
- }
-
- if (disable_mclk_switching) {
- mclk = ps->performance_levels[0].mclk;
- for (i = 1; i < ps->performance_level_count; i++) {
- if (mclk < ps->performance_levels[i].mclk)
- mclk = ps->performance_levels[i].mclk;
- }
- for (i = 0; i < ps->performance_level_count; i++) {
- ps->performance_levels[i].mclk = mclk;
- ps->performance_levels[i].vddci = vddci;
- }
- } else {
- for (i = 1; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
- ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
- if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
- ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
- }
- }
-
- for (i = 0; i < ps->performance_level_count; i++)
- btc_adjust_clock_combinations(adev, max_limits,
- &ps->performance_levels[i]);
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].vddc < min_vce_voltage)
- ps->performance_levels[i].vddc = min_vce_voltage;
- btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- ps->performance_levels[i].sclk,
- max_limits->vddc, &ps->performance_levels[i].vddc);
- btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- ps->performance_levels[i].mclk,
- max_limits->vddci, &ps->performance_levels[i].vddci);
- btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- ps->performance_levels[i].mclk,
- max_limits->vddc, &ps->performance_levels[i].vddc);
- btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
- adev->clock.current_dispclk,
- max_limits->vddc, &ps->performance_levels[i].vddc);
- }
-
- for (i = 0; i < ps->performance_level_count; i++) {
- btc_apply_voltage_delta_rules(adev,
- max_limits->vddc, max_limits->vddci,
- &ps->performance_levels[i].vddc,
- &ps->performance_levels[i].vddci);
- }
-
- ps->dc_compatible = true;
- for (i = 0; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
- ps->dc_compatible = false;
- }
-}
-
-#if 0
-static int si_read_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 *value)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- return amdgpu_si_read_smc_sram_dword(adev,
- si_pi->soft_regs_start + reg_offset, value,
- si_pi->sram_end);
-}
-#endif
-
-static int si_write_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 value)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- return amdgpu_si_write_smc_sram_dword(adev,
- si_pi->soft_regs_start + reg_offset,
- value, si_pi->sram_end);
-}
-
-static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
-{
- bool ret = false;
- u32 tmp, width, row, column, bank, density;
- bool is_memory_gddr5, is_special;
-
- tmp = RREG32(MC_SEQ_MISC0);
- is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
- is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
- & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
-
- WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
- width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
-
- tmp = RREG32(MC_ARB_RAMCFG);
- row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
- column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
- bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
-
- density = (1 << (row + column - 20 + bank)) * width;
-
- if ((adev->pdev->device == 0x6819) &&
- is_memory_gddr5 && is_special && (density == 0x400))
- ret = true;
-
- return ret;
-}
-
-static void si_get_leakage_vddc(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u16 vddc, count = 0;
- int i, ret;
-
- for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
- ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
-
- if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
- si_pi->leakage_voltage.entries[count].voltage = vddc;
- si_pi->leakage_voltage.entries[count].leakage_index =
- SISLANDS_LEAKAGE_INDEX0 + i;
- count++;
- }
- }
- si_pi->leakage_voltage.count = count;
-}
-
-static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
- u32 index, u16 *leakage_voltage)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- int i;
-
- if (leakage_voltage == NULL)
- return -EINVAL;
-
- if ((index & 0xff00) != 0xff00)
- return -EINVAL;
-
- if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
- return -EINVAL;
-
- if (index < SISLANDS_LEAKAGE_INDEX0)
- return -EINVAL;
-
- for (i = 0; i < si_pi->leakage_voltage.count; i++) {
- if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
- *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
- return 0;
- }
- }
- return -EAGAIN;
-}
-
-static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- bool want_thermal_protection;
- enum amdgpu_dpm_event_src dpm_event_src;
-
- switch (sources) {
- case 0:
- default:
- want_thermal_protection = false;
- break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
- break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
- break;
- case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
- (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
- break;
- }
-
- if (want_thermal_protection) {
- WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
- if (pi->thermal_protection)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
- } else {
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
- }
-}
-
-static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
- enum amdgpu_dpm_auto_throttle_src source,
- bool enable)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
-
- if (enable) {
- if (!(pi->active_auto_throttle_sources & (1 << source))) {
- pi->active_auto_throttle_sources |= 1 << source;
- si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
- }
- } else {
- if (pi->active_auto_throttle_sources & (1 << source)) {
- pi->active_auto_throttle_sources &= ~(1 << source);
- si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
- }
- }
-}
-
-static void si_start_dpm(struct amdgpu_device *adev)
-{
- WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
-}
-
-static void si_stop_dpm(struct amdgpu_device *adev)
-{
- WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
-}
-
-static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
-{
- if (enable)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
- else
- WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
-
-}
-
-#if 0
-static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
- u32 thermal_level)
-{
- PPSMC_Result ret;
-
- if (thermal_level == 0) {
- ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
- if (ret == PPSMC_Result_OK)
- return 0;
- else
- return -EINVAL;
- }
- return 0;
-}
-
-static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
-{
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
-}
-#endif
-
-#if 0
-static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
-{
- if (ac_power)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-
- return 0;
-}
-#endif
-
-static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 parameter)
-{
- WREG32(SMC_SCRATCH0, parameter);
- return amdgpu_si_send_msg_to_smc(adev, msg);
-}
-
-static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
-{
- if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
- return -EINVAL;
-
- return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int si_dpm_force_performance_level(void *handle,
- enum amd_dpm_forced_level level)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
- struct si_ps *ps = si_get_ps(rps);
- u32 levels = ps->performance_level_count;
-
- if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
- return -EINVAL;
-
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
- return -EINVAL;
- } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
- return -EINVAL;
-
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
- return -EINVAL;
- } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
- return -EINVAL;
-
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- adev->pm.dpm.forced_level = level;
-
- return 0;
-}
-
-#if 0
-static int si_set_boot_state(struct amdgpu_device *adev)
-{
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-#endif
-
-static int si_set_sw_state(struct amdgpu_device *adev)
-{
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int si_halt_smc(struct amdgpu_device *adev)
-{
- if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
- return -EINVAL;
-
- return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int si_resume_smc(struct amdgpu_device *adev)
-{
- if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
- return -EINVAL;
-
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static void si_dpm_start_smc(struct amdgpu_device *adev)
-{
- amdgpu_si_program_jump_on_start(adev);
- amdgpu_si_start_smc(adev);
- amdgpu_si_smc_clock(adev, true);
-}
-
-static void si_dpm_stop_smc(struct amdgpu_device *adev)
-{
- amdgpu_si_reset_smc(adev);
- amdgpu_si_smc_clock(adev, false);
-}
-
-static int si_process_firmware_header(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->state_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->soft_regs_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->mc_reg_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->fan_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->arb_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->cac_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->dte_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->spll_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->papm_cfg_table_start = tmp;
-
- return ret;
-}
-
-static void si_read_clock_registers(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
- si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
- si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
- si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
- si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
- si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
- si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
- si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
- si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
- si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
- si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
- si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
- si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
- si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
- si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
-}
-
-static void si_enable_thermal_protection(struct amdgpu_device *adev,
- bool enable)
-{
- if (enable)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
- else
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
-}
-
-static void si_enable_acpi_power_management(struct amdgpu_device *adev)
-{
- WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
-}
-
-#if 0
-static int si_enter_ulp_state(struct amdgpu_device *adev)
-{
- WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
-
- udelay(25000);
-
- return 0;
-}
-
-static int si_exit_ulp_state(struct amdgpu_device *adev)
-{
- int i;
-
- WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
-
- udelay(7000);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(SMC_RESP_0) == 1)
- break;
- udelay(1000);
- }
-
- return 0;
-}
-#endif
-
-static int si_notify_smc_display_change(struct amdgpu_device *adev,
- bool has_display)
-{
- PPSMC_Msg msg = has_display ?
- PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
-
- return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static void si_program_response_times(struct amdgpu_device *adev)
-{
- u32 voltage_response_time, acpi_delay_time, vbi_time_out;
- u32 vddc_dly, acpi_dly, vbi_dly;
- u32 reference_clock;
-
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
-
- voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
-
- if (voltage_response_time == 0)
- voltage_response_time = 1000;
-
- acpi_delay_time = 15000;
- vbi_time_out = 100000;
-
- reference_clock = amdgpu_asic_get_xclk(adev);
-
- vddc_dly = (voltage_response_time * reference_clock) / 100;
- acpi_dly = (acpi_delay_time * reference_clock) / 100;
- vbi_dly = (vbi_time_out * reference_clock) / 100;
-
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
-}
-
-static void si_program_ds_registers(struct amdgpu_device *adev)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- u32 tmp;
-
- /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
- if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
- tmp = 0x10;
- else
- tmp = 0x1;
-
- if (eg_pi->sclk_deep_sleep) {
- WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
- WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
- ~AUTOSCALE_ON_SS_CLEAR);
- }
-}
-
-static void si_program_display_gap(struct amdgpu_device *adev)
-{
- u32 tmp, pipe;
- int i;
-
- tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- if (adev->pm.dpm.new_active_crtc_count > 0)
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
- else
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
-
- if (adev->pm.dpm.new_active_crtc_count > 1)
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
- else
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
-
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
-
- tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
- pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
-
- if ((adev->pm.dpm.new_active_crtc_count > 0) &&
- (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
- /* find the first active crtc */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i))
- break;
- }
- if (i == adev->mode_info.num_crtc)
- pipe = 0;
- else
- pipe = i;
-
- tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
- tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
- WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
- }
-
- /* Setting this to false forces the performance state to low if the crtcs are disabled.
- * This can be a problem on PowerXpress systems or if you want to use the card
- * for offscreen rendering or compute if there are no crtcs enabled.
- */
- si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
-}
-
-static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
-
- if (enable) {
- if (pi->sclk_ss)
- WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
- } else {
- WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
- WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
- }
-}
-
-static void si_setup_bsp(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- u32 xclk = amdgpu_asic_get_xclk(adev);
-
- r600_calculate_u_and_p(pi->asi,
- xclk,
- 16,
- &pi->bsp,
- &pi->bsu);
-
- r600_calculate_u_and_p(pi->pasi,
- xclk,
- 16,
- &pi->pbsp,
- &pi->pbsu);
-
-
- pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
- pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
-
- WREG32(CG_BSP, pi->dsp);
-}
-
-static void si_program_git(struct amdgpu_device *adev)
-{
- WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
-}
-
-static void si_program_tp(struct amdgpu_device *adev)
-{
- int i;
- enum r600_td td = R600_TD_DFLT;
-
- for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
- WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
-
- if (td == R600_TD_AUTO)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
- else
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
-
- if (td == R600_TD_UP)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
-
- if (td == R600_TD_DOWN)
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
-}
-
-static void si_program_tpp(struct amdgpu_device *adev)
-{
- WREG32(CG_TPC, R600_TPC_DFLT);
-}
-
-static void si_program_sstp(struct amdgpu_device *adev)
-{
- WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
-}
-
-static void si_enable_display_gap(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
-
- tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
- DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
-
- tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
- DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
-}
-
-static void si_program_vc(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
-
- WREG32(CG_FTV, pi->vrc);
-}
-
-static void si_clear_vc(struct amdgpu_device *adev)
-{
- WREG32(CG_FTV, 0);
-}
-
-static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
-{
- u8 mc_para_index;
-
- if (memory_clock < 10000)
- mc_para_index = 0;
- else if (memory_clock >= 80000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
- return mc_para_index;
-}
-
-static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
-{
- u8 mc_para_index;
-
- if (strobe_mode) {
- if (memory_clock < 12500)
- mc_para_index = 0x00;
- else if (memory_clock > 47500)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 10000) / 2500);
- } else {
- if (memory_clock < 65000)
- mc_para_index = 0x00;
- else if (memory_clock > 135000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 60000) / 5000);
- }
- return mc_para_index;
-}
-
-static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- bool strobe_mode = false;
- u8 result = 0;
-
- if (mclk <= pi->mclk_strobe_mode_threshold)
- strobe_mode = true;
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
- result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
- else
- result = si_get_ddr3_mclk_frequency_ratio(mclk);
-
- if (strobe_mode)
- result |= SISLANDS_SMC_STROBE_ENABLE;
-
- return result;
-}
-
-static int si_upload_firmware(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- amdgpu_si_reset_smc(adev);
- amdgpu_si_smc_clock(adev, false);
-
- return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
-}
-
-static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
- const struct atom_voltage_table *table,
- const struct amdgpu_phase_shedding_limits_table *limits)
-{
- u32 data, num_bits, num_levels;
-
- if ((table == NULL) || (limits == NULL))
- return false;
-
- data = table->mask_low;
-
- num_bits = hweight32(data);
-
- if (num_bits == 0)
- return false;
-
- num_levels = (1 << num_bits);
-
- if (table->count != num_levels)
- return false;
-
- if (limits->count != (num_levels - 1))
- return false;
-
- return true;
-}
-
-static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
- u32 max_voltage_steps,
- struct atom_voltage_table *voltage_table)
-{
- unsigned int i, diff;
-
- if (voltage_table->count <= max_voltage_steps)
- return;
-
- diff = voltage_table->count - max_voltage_steps;
-
- for (i= 0; i < max_voltage_steps; i++)
- voltage_table->entries[i] = voltage_table->entries[i + diff];
-
- voltage_table->count = max_voltage_steps;
-}
-
-static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
- struct atom_voltage_table *voltage_table)
-{
- u32 i;
-
- if (voltage_dependency_table == NULL)
- return -EINVAL;
-
- voltage_table->mask_low = 0;
- voltage_table->phase_delay = 0;
-
- voltage_table->count = voltage_dependency_table->count;
- for (i = 0; i < voltage_table->count; i++) {
- voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
- voltage_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-static int si_construct_voltage_tables(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- int ret;
-
- if (pi->voltage_control) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
- if (ret)
- return ret;
-
- if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(adev,
- SISLANDS_MAX_NO_VREG_STEPS,
- &eg_pi->vddc_voltage_table);
- } else if (si_pi->voltage_control_svi2) {
- ret = si_get_svi2_voltage_table(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- &eg_pi->vddc_voltage_table);
- if (ret)
- return ret;
- } else {
- return -EINVAL;
- }
-
- if (eg_pi->vddci_control) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
- VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
- if (ret)
- return ret;
-
- if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(adev,
- SISLANDS_MAX_NO_VREG_STEPS,
- &eg_pi->vddci_voltage_table);
- }
- if (si_pi->vddci_control_svi2) {
- ret = si_get_svi2_voltage_table(adev,
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- &eg_pi->vddci_voltage_table);
- if (ret)
- return ret;
- }
-
- if (pi->mvdd_control) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
- VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
-
- if (ret) {
- pi->mvdd_control = false;
- return ret;
- }
-
- if (si_pi->mvdd_voltage_table.count == 0) {
- pi->mvdd_control = false;
- return -EINVAL;
- }
-
- if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(adev,
- SISLANDS_MAX_NO_VREG_STEPS,
- &si_pi->mvdd_voltage_table);
- }
-
- if (si_pi->vddc_phase_shed_control) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
- if (ret)
- si_pi->vddc_phase_shed_control = false;
-
- if ((si_pi->vddc_phase_shed_table.count == 0) ||
- (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
- si_pi->vddc_phase_shed_control = false;
- }
-
- return 0;
-}
-
-static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
- const struct atom_voltage_table *voltage_table,
- SISLANDS_SMC_STATETABLE *table)
-{
- unsigned int i;
-
- for (i = 0; i < voltage_table->count; i++)
- table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
-}
-
-static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
- SISLANDS_SMC_STATETABLE *table)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- u8 i;
-
- if (si_pi->voltage_control_svi2) {
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
- si_pi->svc_gpio_id);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
- si_pi->svd_gpio_id);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
- 2);
- } else {
- if (eg_pi->vddc_voltage_table.count) {
- si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
-
- for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
- if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
- table->maxVDDCIndexInPPTable = i;
- break;
- }
- }
- }
-
- if (eg_pi->vddci_voltage_table.count) {
- si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
-
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
- }
-
-
- if (si_pi->mvdd_voltage_table.count) {
- si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
-
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
- cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
- }
-
- if (si_pi->vddc_phase_shed_control) {
- if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
- si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
-
- table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
- cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
-
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
- (u32)si_pi->vddc_phase_shed_table.phase_delay);
- } else {
- si_pi->vddc_phase_shed_control = false;
- }
- }
- }
-
- return 0;
-}
-
-static int si_populate_voltage_value(struct amdgpu_device *adev,
- const struct atom_voltage_table *table,
- u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
-{
- unsigned int i;
-
- for (i = 0; i < table->count; i++) {
- if (value <= table->entries[i].value) {
- voltage->index = (u8)i;
- voltage->value = cpu_to_be16(table->entries[i].value);
- break;
- }
- }
-
- if (i >= table->count)
- return -EINVAL;
-
- return 0;
-}
-
-static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
- SISLANDS_SMC_VOLTAGE_VALUE *voltage)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (pi->mvdd_control) {
- if (mclk <= pi->mvdd_split_frequency)
- voltage->index = 0;
- else
- voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
-
- voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
- }
- return 0;
-}
-
-static int si_get_std_voltage_value(struct amdgpu_device *adev,
- SISLANDS_SMC_VOLTAGE_VALUE *voltage,
- u16 *std_voltage)
-{
- u16 v_index;
- bool voltage_found = false;
- *std_voltage = be16_to_cpu(voltage->value);
-
- if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
- return -EINVAL;
-
- for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
- if (be16_to_cpu(voltage->value) ==
- (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
- voltage_found = true;
- if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
- *std_voltage =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
- else
- *std_voltage =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
- break;
- }
- }
-
- if (!voltage_found) {
- for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
- if (be16_to_cpu(voltage->value) <=
- (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
- voltage_found = true;
- if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
- *std_voltage =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
- else
- *std_voltage =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
- break;
- }
- }
- }
- } else {
- if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
- *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
- }
- }
-
- return 0;
-}
-
-static int si_populate_std_voltage_value(struct amdgpu_device *adev,
- u16 value, u8 index,
- SISLANDS_SMC_VOLTAGE_VALUE *voltage)
-{
- voltage->index = index;
- voltage->value = cpu_to_be16(value);
-
- return 0;
-}
-
-static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
- const struct amdgpu_phase_shedding_limits_table *limits,
- u16 voltage, u32 sclk, u32 mclk,
- SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
-{
- unsigned int i;
-
- for (i = 0; i < limits->count; i++) {
- if ((voltage <= limits->entries[i].voltage) &&
- (sclk <= limits->entries[i].sclk) &&
- (mclk <= limits->entries[i].mclk))
- break;
- }
-
- smc_voltage->phase_settings = (u8)i;
-
- return 0;
-}
-
-static int si_init_arb_table_index(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- tmp &= 0x00FFFFFF;
- tmp |= MC_CG_ARB_FREQ_F1 << 24;
-
- return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
- tmp, si_pi->sram_end);
-}
-
-static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
-{
- return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int si_reset_to_default(struct amdgpu_device *adev)
-{
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- tmp = (tmp >> 24) & 0xff;
-
- if (tmp == MC_CG_ARB_FREQ_F0)
- return 0;
-
- return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
- u32 engine_clock)
-{
- u32 dram_rows;
- u32 dram_refresh_rate;
- u32 mc_arb_rfsh_rate;
- u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
-
- if (tmp >= 4)
- dram_rows = 16384;
- else
- dram_rows = 1 << (tmp + 10);
-
- dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
- mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
-
- return mc_arb_rfsh_rate;
-}
-
-static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
- struct rv7xx_pl *pl,
- SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
-{
- u32 dram_timing;
- u32 dram_timing2;
- u32 burst_time;
-
- arb_regs->mc_arb_rfsh_rate =
- (u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
-
- amdgpu_atombios_set_engine_dram_timings(adev,
- pl->sclk,
- pl->mclk);
-
- dram_timing = RREG32(MC_ARB_DRAM_TIMING);
- dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
- burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
-
- arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
- arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
- arb_regs->mc_arb_burst_time = (u8)burst_time;
-
- return 0;
-}
-
-static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- unsigned int first_arb_set)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ps *state = si_get_ps(amdgpu_state);
- SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
- int i, ret = 0;
-
- for (i = 0; i < state->performance_level_count; i++) {
- ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
- if (ret)
- break;
- ret = amdgpu_si_copy_bytes_to_smc(adev,
- si_pi->arb_table_start +
- offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
- sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
- (u8 *)&arb_regs,
- sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
- si_pi->sram_end);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state)
-{
- return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
- SISLANDS_DRIVER_STATE_ARB_INDEX);
-}
-
-static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
- struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (pi->mvdd_control)
- return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
- si_pi->mvdd_bootup_value, voltage);
-
- return 0;
-}
-
-static int si_populate_smc_initial_state(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_initial_state,
- SISLANDS_SMC_STATETABLE *table)
-{
- struct si_ps *initial_state = si_get_ps(amdgpu_initial_state);
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 reg;
- int ret;
-
- table->initialState.levels[0].mclk.vDLL_CNTL =
- cpu_to_be32(si_pi->clock_registers.dll_cntl);
- table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
- cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
- table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
- cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
- cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
- cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
- cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
- cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
- table->initialState.levels[0].mclk.vMPLL_SS =
- cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->initialState.levels[0].mclk.vMPLL_SS2 =
- cpu_to_be32(si_pi->clock_registers.mpll_ss2);
-
- table->initialState.levels[0].mclk.mclk_value =
- cpu_to_be32(initial_state->performance_levels[0].mclk);
-
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
- cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
- cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
- cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
- cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
- cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
- cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
-
- table->initialState.levels[0].sclk.sclk_value =
- cpu_to_be32(initial_state->performance_levels[0].sclk);
-
- table->initialState.levels[0].arbRefreshState =
- SISLANDS_INITIAL_STATE_ARB_INDEX;
-
- table->initialState.levels[0].ACIndex = 0;
-
- ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- initial_state->performance_levels[0].vddc,
- &table->initialState.levels[0].vddc);
-
- if (!ret) {
- u16 std_vddc;
-
- ret = si_get_std_voltage_value(adev,
- &table->initialState.levels[0].vddc,
- &std_vddc);
- if (!ret)
- si_populate_std_voltage_value(adev, std_vddc,
- table->initialState.levels[0].vddc.index,
- &table->initialState.levels[0].std_vddc);
- }
-
- if (eg_pi->vddci_control)
- si_populate_voltage_value(adev,
- &eg_pi->vddci_voltage_table,
- initial_state->performance_levels[0].vddci,
- &table->initialState.levels[0].vddci);
-
- if (si_pi->vddc_phase_shed_control)
- si_populate_phase_shedding_value(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- initial_state->performance_levels[0].vddc,
- initial_state->performance_levels[0].sclk,
- initial_state->performance_levels[0].mclk,
- &table->initialState.levels[0].vddc);
-
- si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
-
- reg = CG_R(0xffff) | CG_L(0);
- table->initialState.levels[0].aT = cpu_to_be32(reg);
- table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
- table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- table->initialState.levels[0].strobeMode =
- si_get_strobe_mode_settings(adev,
- initial_state->performance_levels[0].mclk);
-
- if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
- table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
- else
- table->initialState.levels[0].mcFlags = 0;
- }
-
- table->initialState.levelCount = 1;
-
- table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
-
- table->initialState.levels[0].dpm2.MaxPS = 0;
- table->initialState.levels[0].dpm2.NearTDPDec = 0;
- table->initialState.levels[0].dpm2.AboveSafeInc = 0;
- table->initialState.levels[0].dpm2.BelowSafeInc = 0;
- table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
-
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
-
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
-
- return 0;
-}
-
-static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
- SISLANDS_SMC_STATETABLE *table)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
- u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
- u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
- u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
- u32 dll_cntl = si_pi->clock_registers.dll_cntl;
- u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
- u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
- u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
- u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
- u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
- u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
- u32 reg;
- int ret;
-
- table->ACPIState = table->initialState;
-
- table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (pi->acpi_vddc) {
- ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
- if (!ret) {
- u16 std_vddc;
-
- ret = si_get_std_voltage_value(adev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
- if (!ret)
- si_populate_std_voltage_value(adev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
- }
- table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
-
- if (si_pi->vddc_phase_shed_control) {
- si_populate_phase_shedding_value(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- pi->acpi_vddc,
- 0,
- 0,
- &table->ACPIState.levels[0].vddc);
- }
- } else {
- ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
- if (!ret) {
- u16 std_vddc;
-
- ret = si_get_std_voltage_value(adev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
-
- if (!ret)
- si_populate_std_voltage_value(adev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
- }
- table->ACPIState.levels[0].gen2PCIE =
- (u8)amdgpu_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- AMDGPU_PCIE_GEN1);
-
- if (si_pi->vddc_phase_shed_control)
- si_populate_phase_shedding_value(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- pi->min_vddc_in_table,
- 0,
- 0,
- &table->ACPIState.levels[0].vddc);
- }
-
- if (pi->acpi_vddc) {
- if (eg_pi->acpi_vddci)
- si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
- eg_pi->acpi_vddci,
- &table->ACPIState.levels[0].vddci);
- }
-
- mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
- mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
-
- dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
-
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(4);
-
- table->ACPIState.levels[0].mclk.vDLL_CNTL =
- cpu_to_be32(dll_cntl);
- table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
- cpu_to_be32(mclk_pwrmgt_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
- cpu_to_be32(mpll_ad_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
- cpu_to_be32(mpll_dq_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
- cpu_to_be32(mpll_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
- cpu_to_be32(mpll_func_cntl_1);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
- cpu_to_be32(mpll_func_cntl_2);
- table->ACPIState.levels[0].mclk.vMPLL_SS =
- cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->ACPIState.levels[0].mclk.vMPLL_SS2 =
- cpu_to_be32(si_pi->clock_registers.mpll_ss2);
-
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
- cpu_to_be32(spll_func_cntl);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
- cpu_to_be32(spll_func_cntl_2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
- cpu_to_be32(spll_func_cntl_3);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
- cpu_to_be32(spll_func_cntl_4);
-
- table->ACPIState.levels[0].mclk.mclk_value = 0;
- table->ACPIState.levels[0].sclk.sclk_value = 0;
-
- si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
-
- if (eg_pi->dynamic_ac_timing)
- table->ACPIState.levels[0].ACIndex = 0;
-
- table->ACPIState.levels[0].dpm2.MaxPS = 0;
- table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
- table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
- table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
- table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
-
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
-
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
-
- return 0;
-}
-
-static int si_populate_ulv_state(struct amdgpu_device *adev,
- SISLANDS_SMC_SWSTATE *state)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
- u32 sclk_in_sr = 1350; /* ??? */
- int ret;
-
- ret = si_convert_power_level_to_smc(adev, &ulv->pl,
- &state->levels[0]);
- if (!ret) {
- if (eg_pi->sclk_deep_sleep) {
- if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
- else
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
- }
- if (ulv->one_pcie_lane_in_ulv)
- state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
- state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
- state->levels[0].ACIndex = 1;
- state->levels[0].std_vddc = state->levels[0].vddc;
- state->levelCount = 1;
-
- state->flags |= PPSMC_SWSTATE_FLAG_DC;
- }
-
- return ret;
-}
-
-static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
- SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
- int ret;
-
- ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
- &arb_regs);
- if (ret)
- return ret;
-
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
- ulv->volt_change_delay);
-
- ret = amdgpu_si_copy_bytes_to_smc(adev,
- si_pi->arb_table_start +
- offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
- sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
- (u8 *)&arb_regs,
- sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
- si_pi->sram_end);
-
- return ret;
-}
-
-static void si_get_mvdd_configuration(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
-
- pi->mvdd_split_frequency = 30000;
-}
-
-static int si_init_smc_table(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
- const struct si_ulv_param *ulv = &si_pi->ulv;
- SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable;
- int ret;
- u32 lane_width;
- u32 vr_hot_gpio;
-
- si_populate_smc_voltage_tables(adev, table);
-
- switch (adev->pm.int_thermal_type) {
- case THERMAL_TYPE_SI:
- case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
- table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
- break;
- case THERMAL_TYPE_NONE:
- table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
- break;
- default:
- table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
- break;
- }
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
- table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
- if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
- table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
- }
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
- table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
- table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
- table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
- table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
- vr_hot_gpio = adev->pm.dpm.backbias_response_time;
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
- vr_hot_gpio);
- }
-
- ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
- if (ret)
- return ret;
-
- ret = si_populate_smc_acpi_state(adev, table);
- if (ret)
- return ret;
-
- table->driverState = table->initialState;
-
- ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
- SISLANDS_INITIAL_STATE_ARB_INDEX);
- if (ret)
- return ret;
-
- if (ulv->supported && ulv->pl.vddc) {
- ret = si_populate_ulv_state(adev, &table->ULVState);
- if (ret)
- return ret;
-
- ret = si_program_ulv_memory_timing_parameters(adev);
- if (ret)
- return ret;
-
- WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
- WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
-
- lane_width = amdgpu_get_pcie_lanes(adev);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
- } else {
- table->ULVState = table->initialState;
- }
-
- return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
- (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
- si_pi->sram_end);
-}
-
-static int si_calculate_sclk_params(struct amdgpu_device *adev,
- u32 engine_clock,
- SISLANDS_SMC_SCLK_VALUE *sclk)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct atom_clock_dividers dividers;
- u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
- u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
- u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
- u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
- u64 tmp;
- u32 reference_clock = adev->clock.spll.reference_freq;
- u32 reference_divider;
- u32 fbdiv;
- int ret;
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- engine_clock, false, &dividers);
- if (ret)
- return ret;
-
- reference_divider = 1 + dividers.ref_div;
-
- tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
- do_div(tmp, reference_clock);
- fbdiv = (u32) tmp;
-
- spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
- spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
- spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
-
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(2);
-
- spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
-
- if (pi->sclk_ss) {
- struct amdgpu_atom_ss ss;
- u32 vco_freq = engine_clock * dividers.post_div;
-
- if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
- u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
- u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
-
- cg_spll_spread_spectrum &= ~CLK_S_MASK;
- cg_spll_spread_spectrum |= CLK_S(clk_s);
- cg_spll_spread_spectrum |= SSEN;
-
- cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
- cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
- }
- }
-
- sclk->sclk_value = engine_clock;
- sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
- sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
- sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
- sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
- sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
- sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
-
- return 0;
-}
-
-static int si_populate_sclk_value(struct amdgpu_device *adev,
- u32 engine_clock,
- SISLANDS_SMC_SCLK_VALUE *sclk)
-{
- SISLANDS_SMC_SCLK_VALUE sclk_tmp;
- int ret;
-
- ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
- if (!ret) {
- sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
- sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
- sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
- sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
- sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
- sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
- sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
- }
-
- return ret;
-}
-
-static int si_populate_mclk_value(struct amdgpu_device *adev,
- u32 engine_clock,
- u32 memory_clock,
- SISLANDS_SMC_MCLK_VALUE *mclk,
- bool strobe_mode,
- bool dll_state_on)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 dll_cntl = si_pi->clock_registers.dll_cntl;
- u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
- u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
- u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
- u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
- u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
- u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
- u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1;
- u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2;
- struct atom_mpll_param mpll_param;
- int ret;
-
- ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
- if (ret)
- return ret;
-
- mpll_func_cntl &= ~BWCTRL_MASK;
- mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
-
- mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
- mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
- CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
-
- mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
- mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
- mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
- YCLK_POST_DIV(mpll_param.post_div);
- }
-
- if (pi->mclk_ss) {
- struct amdgpu_atom_ss ss;
- u32 freq_nom;
- u32 tmp;
- u32 reference_clock = adev->clock.mpll.reference_freq;
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
- freq_nom = memory_clock * 4;
- else
- freq_nom = memory_clock * 2;
-
- tmp = freq_nom / reference_clock;
- tmp = tmp * tmp;
- if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
- u32 clks = reference_clock * 5 / ss.rate;
- u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
-
- mpll_ss1 &= ~CLKV_MASK;
- mpll_ss1 |= CLKV(clkv);
-
- mpll_ss2 &= ~CLKS_MASK;
- mpll_ss2 |= CLKS(clks);
- }
- }
-
- mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
- mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
-
- if (dll_state_on)
- mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
- else
- mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
-
- mclk->mclk_value = cpu_to_be32(memory_clock);
- mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
- mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
- mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
- mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
- mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
- mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
- mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
- mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
- mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
-
- return 0;
-}
-
-static void si_populate_smc_sp(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SISLANDS_SMC_SWSTATE *smc_state)
-{
- struct si_ps *ps = si_get_ps(amdgpu_state);
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- int i;
-
- for (i = 0; i < ps->performance_level_count - 1; i++)
- smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
-
- smc_state->levels[ps->performance_level_count - 1].bSP =
- cpu_to_be32(pi->psp);
-}
-
-static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
- struct rv7xx_pl *pl,
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- int ret;
- bool dll_state_on;
- u16 std_vddc;
- bool gmc_pg = false;
-
- if (eg_pi->pcie_performance_request &&
- (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
- level->gen2PCIE = (u8)si_pi->force_pcie_gen;
- else
- level->gen2PCIE = (u8)pl->pcie_gen;
-
- ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
- if (ret)
- return ret;
-
- level->mcFlags = 0;
-
- if (pi->mclk_stutter_mode_threshold &&
- (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
- !eg_pi->uvd_enabled &&
- (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
- (adev->pm.dpm.new_active_crtc_count <= 2)) {
- level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
-
- if (gmc_pg)
- level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
- }
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- if (pl->mclk > pi->mclk_edc_enable_threshold)
- level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
-
- if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
- level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
-
- level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
-
- if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
- if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
- ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
- dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
- else
- dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
- } else {
- dll_state_on = false;
- }
- } else {
- level->strobeMode = si_get_strobe_mode_settings(adev,
- pl->mclk);
-
- dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
- }
-
- ret = si_populate_mclk_value(adev,
- pl->sclk,
- pl->mclk,
- &level->mclk,
- (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
- if (ret)
- return ret;
-
- ret = si_populate_voltage_value(adev,
- &eg_pi->vddc_voltage_table,
- pl->vddc, &level->vddc);
- if (ret)
- return ret;
-
-
- ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
- if (ret)
- return ret;
-
- ret = si_populate_std_voltage_value(adev, std_vddc,
- level->vddc.index, &level->std_vddc);
- if (ret)
- return ret;
-
- if (eg_pi->vddci_control) {
- ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
- pl->vddci, &level->vddci);
- if (ret)
- return ret;
- }
-
- if (si_pi->vddc_phase_shed_control) {
- ret = si_populate_phase_shedding_value(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- pl->vddc,
- pl->sclk,
- pl->mclk,
- &level->vddc);
- if (ret)
- return ret;
- }
-
- level->MaxPoweredUpCU = si_pi->max_cu;
-
- ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
-
- return ret;
-}
-
-static int si_populate_smc_t(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SISLANDS_SMC_SWSTATE *smc_state)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct si_ps *state = si_get_ps(amdgpu_state);
- u32 a_t;
- u32 t_l, t_h;
- u32 high_bsp;
- int i, ret;
-
- if (state->performance_level_count >= 9)
- return -EINVAL;
-
- if (state->performance_level_count < 2) {
- a_t = CG_R(0xffff) | CG_L(0);
- smc_state->levels[0].aT = cpu_to_be32(a_t);
- return 0;
- }
-
- smc_state->levels[0].aT = cpu_to_be32(0);
-
- for (i = 0; i <= state->performance_level_count - 2; i++) {
- ret = r600_calculate_at(
- (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
- 100 * R600_AH_DFLT,
- state->performance_levels[i + 1].sclk,
- state->performance_levels[i].sclk,
- &t_l,
- &t_h);
-
- if (ret) {
- t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
- t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
- }
-
- a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
- a_t |= CG_R(t_l * pi->bsp / 20000);
- smc_state->levels[i].aT = cpu_to_be32(a_t);
-
- high_bsp = (i == state->performance_level_count - 2) ?
- pi->pbsp : pi->bsp;
- a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
- smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
- }
-
- return 0;
-}
-
-static int si_disable_ulv(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
-
- if (ulv->supported)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-
- return 0;
-}
-
-static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- const struct si_power_info *si_pi = si_get_pi(adev);
- const struct si_ulv_param *ulv = &si_pi->ulv;
- const struct si_ps *state = si_get_ps(amdgpu_state);
- int i;
-
- if (state->performance_levels[0].mclk != ulv->pl.mclk)
- return false;
-
- /* XXX validate against display requirements! */
-
- for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
- if (adev->clock.current_dispclk <=
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
- if (ulv->pl.vddc <
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
- return false;
- }
- }
-
- if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
- return false;
-
- return true;
-}
-
-static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state)
-{
- const struct si_power_info *si_pi = si_get_pi(adev);
- const struct si_ulv_param *ulv = &si_pi->ulv;
-
- if (ulv->supported) {
- if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
- }
- return 0;
-}
-
-static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SISLANDS_SMC_SWSTATE *smc_state)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ps *state = si_get_ps(amdgpu_state);
- int i, ret;
- u32 threshold;
- u32 sclk_in_sr = 1350; /* ??? */
-
- if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
- return -EINVAL;
-
- threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
-
- if (amdgpu_state->vclk && amdgpu_state->dclk) {
- eg_pi->uvd_enabled = true;
- if (eg_pi->smu_uvd_hs)
- smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
- } else {
- eg_pi->uvd_enabled = false;
- }
-
- if (state->dc_compatible)
- smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
-
- smc_state->levelCount = 0;
- for (i = 0; i < state->performance_level_count; i++) {
- if (eg_pi->sclk_deep_sleep) {
- if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
- if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
- smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
- else
- smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
- }
- }
-
- ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
- &smc_state->levels[i]);
- smc_state->levels[i].arbRefreshState =
- (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
-
- if (ret)
- return ret;
-
- if (ni_pi->enable_power_containment)
- smc_state->levels[i].displayWatermark =
- (state->performance_levels[i].sclk < threshold) ?
- PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
- else
- smc_state->levels[i].displayWatermark = (i < 2) ?
- PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
-
- if (eg_pi->dynamic_ac_timing)
- smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
- else
- smc_state->levels[i].ACIndex = 0;
-
- smc_state->levelCount++;
- }
-
- si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_watermark_threshold,
- threshold / 512);
-
- si_populate_smc_sp(adev, amdgpu_state, smc_state);
-
- ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
- if (ret)
- ni_pi->enable_power_containment = false;
-
- ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
- if (ret)
- ni_pi->enable_sq_ramping = false;
-
- return si_populate_smc_t(adev, amdgpu_state, smc_state);
-}
-
-static int si_upload_sw_state(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ps *new_state = si_get_ps(amdgpu_new_state);
- int ret;
- u32 address = si_pi->state_table_start +
- offsetof(SISLANDS_SMC_STATETABLE, driverState);
- u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
- ((new_state->performance_level_count - 1) *
- sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
- SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
-
- memset(smc_state, 0, state_size);
-
- ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
- if (ret)
- return ret;
-
- return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
- state_size, si_pi->sram_end);
-}
-
-static int si_upload_ulv_state(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
- int ret = 0;
-
- if (ulv->supported && ulv->pl.vddc) {
- u32 address = si_pi->state_table_start +
- offsetof(SISLANDS_SMC_STATETABLE, ULVState);
- SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
- u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
-
- memset(smc_state, 0, state_size);
-
- ret = si_populate_ulv_state(adev, smc_state);
- if (!ret)
- ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
- state_size, si_pi->sram_end);
- }
-
- return ret;
-}
-
-static int si_upload_smc_data(struct amdgpu_device *adev)
-{
- struct amdgpu_crtc *amdgpu_crtc = NULL;
- int i;
-
- if (adev->pm.dpm.new_active_crtc_count == 0)
- return 0;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
- amdgpu_crtc = adev->mode_info.crtcs[i];
- break;
- }
- }
-
- if (amdgpu_crtc == NULL)
- return 0;
-
- if (amdgpu_crtc->line_time <= 0)
- return 0;
-
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_crtc_index,
- amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
- return 0;
-
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
- amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
-
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
- amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
-
- return 0;
-}
-
-static int si_set_mc_special_registers(struct amdgpu_device *adev,
- struct si_mc_reg_table *table)
-{
- u8 i, j, k;
- u32 temp_reg;
-
- for (i = 0, j = table->last; i < table->last; i++) {
- if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- switch (table->mc_reg_address[i].s1) {
- case MC_SEQ_MISC1:
- temp_reg = RREG32(MC_PMG_CMD_EMRS);
- table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
- table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
- for (k = 0; k < table->num_entries; k++)
- table->mc_reg_table_entry[k].mc_data[j] =
- ((temp_reg & 0xffff0000)) |
- ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
- j++;
-
- if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- temp_reg = RREG32(MC_PMG_CMD_MRS);
- table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
- table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
- table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
- j++;
-
- if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
- if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
- table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
- for (k = 0; k < table->num_entries; k++)
- table->mc_reg_table_entry[k].mc_data[j] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
- j++;
- }
- break;
- case MC_SEQ_RESERVE_M:
- temp_reg = RREG32(MC_PMG_CMD_MRS1);
- table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
- table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
- for(k = 0; k < table->num_entries; k++)
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- j++;
- break;
- default:
- break;
- }
- }
-
- table->last = j;
-
- return 0;
-}
-
-static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
-{
- bool result = true;
- switch (in_reg) {
- case MC_SEQ_RAS_TIMING:
- *out_reg = MC_SEQ_RAS_TIMING_LP;
- break;
- case MC_SEQ_CAS_TIMING:
- *out_reg = MC_SEQ_CAS_TIMING_LP;
- break;
- case MC_SEQ_MISC_TIMING:
- *out_reg = MC_SEQ_MISC_TIMING_LP;
- break;
- case MC_SEQ_MISC_TIMING2:
- *out_reg = MC_SEQ_MISC_TIMING2_LP;
- break;
- case MC_SEQ_RD_CTL_D0:
- *out_reg = MC_SEQ_RD_CTL_D0_LP;
- break;
- case MC_SEQ_RD_CTL_D1:
- *out_reg = MC_SEQ_RD_CTL_D1_LP;
- break;
- case MC_SEQ_WR_CTL_D0:
- *out_reg = MC_SEQ_WR_CTL_D0_LP;
- break;
- case MC_SEQ_WR_CTL_D1:
- *out_reg = MC_SEQ_WR_CTL_D1_LP;
- break;
- case MC_PMG_CMD_EMRS:
- *out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
- break;
- case MC_PMG_CMD_MRS:
- *out_reg = MC_SEQ_PMG_CMD_MRS_LP;
- break;
- case MC_PMG_CMD_MRS1:
- *out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
- break;
- case MC_SEQ_PMG_TIMING:
- *out_reg = MC_SEQ_PMG_TIMING_LP;
- break;
- case MC_PMG_CMD_MRS2:
- *out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
- break;
- case MC_SEQ_WR_CTL_2:
- *out_reg = MC_SEQ_WR_CTL_2_LP;
- break;
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-static void si_set_valid_flag(struct si_mc_reg_table *table)
-{
- u8 i, j;
-
- for (i = 0; i < table->last; i++) {
- for (j = 1; j < table->num_entries; j++) {
- if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
- table->valid_flag |= 1 << i;
- break;
- }
- }
- }
-}
-
-static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
-{
- u32 i;
- u16 address;
-
- for (i = 0; i < table->last; i++)
- table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
- address : table->mc_reg_address[i].s1;
-
-}
-
-static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
- struct si_mc_reg_table *si_table)
-{
- u8 i, j;
-
- if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- if (table->num_entries > MAX_AC_TIMING_ENTRIES)
- return -EINVAL;
-
- for (i = 0; i < table->last; i++)
- si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
- si_table->last = table->last;
-
- for (i = 0; i < table->num_entries; i++) {
- si_table->mc_reg_table_entry[i].mclk_max =
- table->mc_reg_table_entry[i].mclk_max;
- for (j = 0; j < table->last; j++) {
- si_table->mc_reg_table_entry[i].mc_data[j] =
- table->mc_reg_table_entry[i].mc_data[j];
- }
- }
- si_table->num_entries = table->num_entries;
-
- return 0;
-}
-
-static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct atom_mc_reg_table *table;
- struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
- u8 module_index = rv770_get_memory_module_index(adev);
- int ret;
-
- table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
- WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
- WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
- WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
- WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
- WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
- WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
- WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
- WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
- WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
- WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
- WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
- WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
- WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
-
- ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
- if (ret)
- goto init_mc_done;
-
- ret = si_copy_vbios_mc_reg_table(table, si_table);
- if (ret)
- goto init_mc_done;
-
- si_set_s0_mc_reg_index(si_table);
-
- ret = si_set_mc_special_registers(adev, si_table);
- if (ret)
- goto init_mc_done;
-
- si_set_valid_flag(si_table);
-
-init_mc_done:
- kfree(table);
-
- return ret;
-
-}
-
-static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
- SMC_SIslands_MCRegisters *mc_reg_table)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 i, j;
-
- for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
- if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
- if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- break;
- mc_reg_table->address[i].s0 =
- cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
- mc_reg_table->address[i].s1 =
- cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
- i++;
- }
- }
- mc_reg_table->last = (u8)i;
-}
-
-static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
- SMC_SIslands_MCRegisterSet *data,
- u32 num_entries, u32 valid_flag)
-{
- u32 i, j;
-
- for(i = 0, j = 0; j < num_entries; j++) {
- if (valid_flag & (1 << j)) {
- data->value[i] = cpu_to_be32(entry->mc_data[j]);
- i++;
- }
- }
-}
-
-static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
- struct rv7xx_pl *pl,
- SMC_SIslands_MCRegisterSet *mc_reg_table_data)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 i = 0;
-
- for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
- if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
- break;
- }
-
- if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
- --i;
-
- si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
- mc_reg_table_data, si_pi->mc_reg_table.last,
- si_pi->mc_reg_table.valid_flag);
-}
-
-static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SMC_SIslands_MCRegisters *mc_reg_table)
-{
- struct si_ps *state = si_get_ps(amdgpu_state);
- int i;
-
- for (i = 0; i < state->performance_level_count; i++) {
- si_convert_mc_reg_table_entry_to_smc(adev,
- &state->performance_levels[i],
- &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
- }
-}
-
-static int si_populate_mc_reg_table(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_boot_state)
-{
- struct si_ps *boot_state = si_get_ps(amdgpu_boot_state);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
- SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
-
- memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
-
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
-
- si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
-
- si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
- &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
-
- si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
- &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
- si_pi->mc_reg_table.last,
- si_pi->mc_reg_table.valid_flag);
-
- if (ulv->supported && ulv->pl.vddc != 0)
- si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
- &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
- else
- si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
- &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
- si_pi->mc_reg_table.last,
- si_pi->mc_reg_table.valid_flag);
-
- si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
-
- return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
- (u8 *)smc_mc_reg_table,
- sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
-}
-
-static int si_upload_mc_reg_table(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state)
-{
- struct si_ps *new_state = si_get_ps(amdgpu_new_state);
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 address = si_pi->mc_reg_table_start +
- offsetof(SMC_SIslands_MCRegisters,
- data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
- SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
-
- memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
-
- si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
-
- return amdgpu_si_copy_bytes_to_smc(adev, address,
- (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
- sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
- si_pi->sram_end);
-}
-
-static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
-{
- if (enable)
- WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
- else
- WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
-}
-
-static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct si_ps *state = si_get_ps(amdgpu_state);
- int i;
- u16 pcie_speed, max_speed = 0;
-
- for (i = 0; i < state->performance_level_count; i++) {
- pcie_speed = state->performance_levels[i].pcie_gen;
- if (max_speed < pcie_speed)
- max_speed = pcie_speed;
- }
- return max_speed;
-}
-
-static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
-{
- u32 speed_cntl;
-
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
- speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
-
- return (u16)speed_cntl;
-}
-
-static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
- enum amdgpu_pcie_gen current_link_speed;
-
- if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
- current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
- else
- current_link_speed = si_pi->force_pcie_gen;
-
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
- si_pi->pspp_notify_required = false;
- if (target_link_speed > current_link_speed) {
- switch (target_link_speed) {
-#if defined(CONFIG_ACPI)
- case AMDGPU_PCIE_GEN3:
- if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
- break;
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
- if (current_link_speed == AMDGPU_PCIE_GEN2)
- break;
- /* fall through */
- case AMDGPU_PCIE_GEN2:
- if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
- break;
-#endif
- /* fall through */
- default:
- si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- si_pi->pspp_notify_required = true;
- }
-}
-
-static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
- u8 request;
-
- if (si_pi->pspp_notify_required) {
- if (target_link_speed == AMDGPU_PCIE_GEN3)
- request = PCIE_PERF_REQ_PECI_GEN3;
- else if (target_link_speed == AMDGPU_PCIE_GEN2)
- request = PCIE_PERF_REQ_PECI_GEN2;
- else
- request = PCIE_PERF_REQ_PECI_GEN1;
-
- if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
- (si_get_current_pcie_speed(adev) > 0))
- return;
-
-#if defined(CONFIG_ACPI)
- amdgpu_acpi_pcie_performance_request(adev, request, false);
-#endif
- }
-}
-
-#if 0
-static int si_ds_request(struct amdgpu_device *adev,
- bool ds_status_on, u32 count_write)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
-
- if (eg_pi->sclk_deep_sleep) {
- if (ds_status_on)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
- PPSMC_Result_OK) ?
- 0 : -EINVAL;
- else
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
- PPSMC_Result_OK) ? 0 : -EINVAL;
- }
- return 0;
-}
-#endif
-
-static void si_set_max_cu_value(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (adev->asic_type == CHIP_VERDE) {
- switch (adev->pdev->device) {
- case 0x6820:
- case 0x6825:
- case 0x6821:
- case 0x6823:
- case 0x6827:
- si_pi->max_cu = 10;
- break;
- case 0x682D:
- case 0x6824:
- case 0x682F:
- case 0x6826:
- si_pi->max_cu = 8;
- break;
- case 0x6828:
- case 0x6830:
- case 0x6831:
- case 0x6838:
- case 0x6839:
- case 0x683D:
- si_pi->max_cu = 10;
- break;
- case 0x683B:
- case 0x683F:
- case 0x6829:
- si_pi->max_cu = 8;
- break;
- default:
- si_pi->max_cu = 0;
- break;
- }
- } else {
- si_pi->max_cu = 0;
- }
-}
-
-static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *table)
-{
- u32 i;
- int j;
- u16 leakage_voltage;
-
- if (table) {
- for (i = 0; i < table->count; i++) {
- switch (si_get_leakage_voltage_from_leakage_index(adev,
- table->entries[i].v,
- &leakage_voltage)) {
- case 0:
- table->entries[i].v = leakage_voltage;
- break;
- case -EAGAIN:
- return -EINVAL;
- case -EINVAL:
- default:
- break;
- }
- }
-
- for (j = (table->count - 2); j >= 0; j--) {
- table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
- table->entries[j].v : table->entries[j + 1].v;
- }
- }
- return 0;
-}
-
-static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
-{
- int ret = 0;
-
- ret = si_patch_single_dependency_table_based_on_leakage(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
- if (ret)
- DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
- ret = si_patch_single_dependency_table_based_on_leakage(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
- if (ret)
- DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
- ret = si_patch_single_dependency_table_based_on_leakage(adev,
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
- if (ret)
- DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
- return ret;
-}
-
-static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- u32 lane_width;
- u32 new_lane_width =
- ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
- u32 current_lane_width =
- ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
-
- if (new_lane_width != current_lane_width) {
- amdgpu_set_pcie_lanes(adev, new_lane_width);
- lane_width = amdgpu_get_pcie_lanes(adev);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
- }
-}
-
-static void si_dpm_setup_asic(struct amdgpu_device *adev)
-{
- si_read_clock_registers(adev);
- si_enable_acpi_power_management(adev);
-}
-
-static int si_thermal_enable_alert(struct amdgpu_device *adev,
- bool enable)
-{
- u32 thermal_int = RREG32(CG_THERMAL_INT);
-
- if (enable) {
- PPSMC_Result result;
-
- thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
- WREG32(CG_THERMAL_INT, thermal_int);
- result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
- if (result != PPSMC_Result_OK) {
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- return -EINVAL;
- }
- } else {
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- WREG32(CG_THERMAL_INT, thermal_int);
- }
-
- return 0;
-}
-
-static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
- int min_temp, int max_temp)
-{
- int low_temp = 0 * 1000;
- int high_temp = 255 * 1000;
-
- if (low_temp < min_temp)
- low_temp = min_temp;
- if (high_temp > max_temp)
- high_temp = max_temp;
- if (high_temp < low_temp) {
- DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
- return -EINVAL;
- }
-
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
- WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
-
- adev->pm.dpm.thermal.min_temp = low_temp;
- adev->pm.dpm.thermal.max_temp = high_temp;
-
- return 0;
-}
-
-static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
-
- if (si_pi->fan_ctrl_is_in_default_mode) {
- tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
- si_pi->fan_ctrl_default_mode = tmp;
- tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
- si_pi->t_min = tmp;
- si_pi->fan_ctrl_is_in_default_mode = false;
- }
-
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(0);
- WREG32(CG_FDO_CTRL2, tmp);
-
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(mode);
- WREG32(CG_FDO_CTRL2, tmp);
-}
-
-static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
- u32 duty100;
- u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- u16 fdo_min, slope1, slope2;
- u32 reference_clock, tmp;
- int ret;
- u64 tmp64;
-
- if (!si_pi->fan_table_start) {
- adev->pm.dpm.fan.ucode_fan_control = false;
- return 0;
- }
-
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
-
- if (duty100 == 0) {
- adev->pm.dpm.fan.ucode_fan_control = false;
- return 0;
- }
-
- tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
- do_div(tmp64, 10000);
- fdo_min = (u16)tmp64;
-
- t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
- t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
-
- pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
- pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
-
- slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
- fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
- fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
- fan_table.slope1 = cpu_to_be16(slope1);
- fan_table.slope2 = cpu_to_be16(slope2);
- fan_table.fdo_min = cpu_to_be16(fdo_min);
- fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
- fan_table.hys_up = cpu_to_be16(1);
- fan_table.hys_slope = cpu_to_be16(1);
- fan_table.temp_resp_lim = cpu_to_be16(5);
- reference_clock = amdgpu_asic_get_xclk(adev);
-
- fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
- reference_clock) / 1600);
- fan_table.fdo_max = cpu_to_be16((u16)duty100);
-
- tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
- fan_table.temp_src = (uint8_t)tmp;
-
- ret = amdgpu_si_copy_bytes_to_smc(adev,
- si_pi->fan_table_start,
- (u8 *)(&fan_table),
- sizeof(fan_table),
- si_pi->sram_end);
-
- if (ret) {
- DRM_ERROR("Failed to load fan table to the SMC.");
- adev->pm.dpm.fan.ucode_fan_control = false;
- }
-
- return ret;
-}
-
-static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- PPSMC_Result ret;
-
- ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
- if (ret == PPSMC_Result_OK) {
- si_pi->fan_is_controlled_by_smc = true;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- PPSMC_Result ret;
-
- ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
-
- if (ret == PPSMC_Result_OK) {
- si_pi->fan_is_controlled_by_smc = false;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static int si_dpm_get_fan_speed_percent(void *handle,
- u32 *speed)
-{
- u32 duty, duty100;
- u64 tmp64;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
- duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
-
- if (duty100 == 0)
- return -EINVAL;
-
- tmp64 = (u64)duty * 100;
- do_div(tmp64, duty100);
- *speed = (u32)tmp64;
-
- if (*speed > 100)
- *speed = 100;
-
- return 0;
-}
-
-static int si_dpm_set_fan_speed_percent(void *handle,
- u32 speed)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
- u32 duty, duty100;
- u64 tmp64;
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (si_pi->fan_is_controlled_by_smc)
- return -EINVAL;
-
- if (speed > 100)
- return -EINVAL;
-
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
-
- if (duty100 == 0)
- return -EINVAL;
-
- tmp64 = (u64)speed * duty100;
- do_div(tmp64, 100);
- duty = (u32)tmp64;
-
- tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
- tmp |= FDO_STATIC_DUTY(duty);
- WREG32(CG_FDO_CTRL0, tmp);
-
- return 0;
-}
-
-static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (mode) {
- /* stop auto-manage */
- if (adev->pm.dpm.fan.ucode_fan_control)
- si_fan_ctrl_stop_smc_fan_control(adev);
- si_fan_ctrl_set_static_mode(adev, mode);
- } else {
- /* restart auto-manage */
- if (adev->pm.dpm.fan.ucode_fan_control)
- si_thermal_start_smc_fan_control(adev);
- else
- si_fan_ctrl_set_default_mode(adev);
- }
-}
-
-static u32 si_dpm_get_fan_control_mode(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
-
- if (si_pi->fan_is_controlled_by_smc)
- return 0;
-
- tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
- return (tmp >> FDO_PWM_MODE_SHIFT);
-}
-
-#if 0
-static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
- u32 *speed)
-{
- u32 tach_period;
- u32 xclk = amdgpu_asic_get_xclk(adev);
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (adev->pm.fan_pulses_per_revolution == 0)
- return -ENOENT;
-
- tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
- if (tach_period == 0)
- return -ENOENT;
-
- *speed = 60 * xclk * 10000 / tach_period;
-
- return 0;
-}
-
-static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
- u32 speed)
-{
- u32 tach_period, tmp;
- u32 xclk = amdgpu_asic_get_xclk(adev);
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (adev->pm.fan_pulses_per_revolution == 0)
- return -ENOENT;
-
- if ((speed < adev->pm.fan_min_rpm) ||
- (speed > adev->pm.fan_max_rpm))
- return -EINVAL;
-
- if (adev->pm.dpm.fan.ucode_fan_control)
- si_fan_ctrl_stop_smc_fan_control(adev);
-
- tach_period = 60 * xclk * 10000 / (8 * speed);
- tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
- tmp |= TARGET_PERIOD(tach_period);
- WREG32(CG_TACH_CTRL, tmp);
-
- si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
-
- return 0;
-}
-#endif
-
-static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
-
- if (!si_pi->fan_ctrl_is_in_default_mode) {
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
- WREG32(CG_FDO_CTRL2, tmp);
-
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(si_pi->t_min);
- WREG32(CG_FDO_CTRL2, tmp);
- si_pi->fan_ctrl_is_in_default_mode = true;
- }
-}
-
-static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm.fan.ucode_fan_control) {
- si_fan_ctrl_start_smc_fan_control(adev);
- si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
- }
-}
-
-static void si_thermal_initialize(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- if (adev->pm.fan_pulses_per_revolution) {
- tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
- tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
- WREG32(CG_TACH_CTRL, tmp);
- }
-
- tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
- tmp |= TACH_PWM_RESP_RATE(0x28);
- WREG32(CG_FDO_CTRL2, tmp);
-}
-
-static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
-{
- int ret;
-
- si_thermal_initialize(adev);
- ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- ret = si_thermal_enable_alert(adev, true);
- if (ret)
- return ret;
- if (adev->pm.dpm.fan.ucode_fan_control) {
- ret = si_halt_smc(adev);
- if (ret)
- return ret;
- ret = si_thermal_setup_fan_table(adev);
- if (ret)
- return ret;
- ret = si_resume_smc(adev);
- if (ret)
- return ret;
- si_thermal_start_smc_fan_control(adev);
- }
-
- return 0;
-}
-
-static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
-{
- if (!adev->pm.no_fan) {
- si_fan_ctrl_set_default_mode(adev);
- si_fan_ctrl_stop_smc_fan_control(adev);
- }
-}
-
-static int si_dpm_enable(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
- int ret;
-
- if (amdgpu_si_is_smc_running(adev))
- return -EINVAL;
- if (pi->voltage_control || si_pi->voltage_control_svi2)
- si_enable_voltage_control(adev, true);
- if (pi->mvdd_control)
- si_get_mvdd_configuration(adev);
- if (pi->voltage_control || si_pi->voltage_control_svi2) {
- ret = si_construct_voltage_tables(adev);
- if (ret) {
- DRM_ERROR("si_construct_voltage_tables failed\n");
- return ret;
- }
- }
- if (eg_pi->dynamic_ac_timing) {
- ret = si_initialize_mc_reg_table(adev);
- if (ret)
- eg_pi->dynamic_ac_timing = false;
- }
- if (pi->dynamic_ss)
- si_enable_spread_spectrum(adev, true);
- if (pi->thermal_protection)
- si_enable_thermal_protection(adev, true);
- si_setup_bsp(adev);
- si_program_git(adev);
- si_program_tp(adev);
- si_program_tpp(adev);
- si_program_sstp(adev);
- si_enable_display_gap(adev);
- si_program_vc(adev);
- ret = si_upload_firmware(adev);
- if (ret) {
- DRM_ERROR("si_upload_firmware failed\n");
- return ret;
- }
- ret = si_process_firmware_header(adev);
- if (ret) {
- DRM_ERROR("si_process_firmware_header failed\n");
- return ret;
- }
- ret = si_initial_switch_from_arb_f0_to_f1(adev);
- if (ret) {
- DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
- return ret;
- }
- ret = si_init_smc_table(adev);
- if (ret) {
- DRM_ERROR("si_init_smc_table failed\n");
- return ret;
- }
- ret = si_init_smc_spll_table(adev);
- if (ret) {
- DRM_ERROR("si_init_smc_spll_table failed\n");
- return ret;
- }
- ret = si_init_arb_table_index(adev);
- if (ret) {
- DRM_ERROR("si_init_arb_table_index failed\n");
- return ret;
- }
- if (eg_pi->dynamic_ac_timing) {
- ret = si_populate_mc_reg_table(adev, boot_ps);
- if (ret) {
- DRM_ERROR("si_populate_mc_reg_table failed\n");
- return ret;
- }
- }
- ret = si_initialize_smc_cac_tables(adev);
- if (ret) {
- DRM_ERROR("si_initialize_smc_cac_tables failed\n");
- return ret;
- }
- ret = si_initialize_hardware_cac_manager(adev);
- if (ret) {
- DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
- return ret;
- }
- ret = si_initialize_smc_dte_tables(adev);
- if (ret) {
- DRM_ERROR("si_initialize_smc_dte_tables failed\n");
- return ret;
- }
- ret = si_populate_smc_tdp_limits(adev, boot_ps);
- if (ret) {
- DRM_ERROR("si_populate_smc_tdp_limits failed\n");
- return ret;
- }
- ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
- if (ret) {
- DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
- return ret;
- }
- si_program_response_times(adev);
- si_program_ds_registers(adev);
- si_dpm_start_smc(adev);
- ret = si_notify_smc_display_change(adev, false);
- if (ret) {
- DRM_ERROR("si_notify_smc_display_change failed\n");
- return ret;
- }
- si_enable_sclk_control(adev, true);
- si_start_dpm(adev);
-
- si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
- si_thermal_start_thermal_controller(adev);
-
- return 0;
-}
-
-static int si_set_temperature_range(struct amdgpu_device *adev)
-{
- int ret;
-
- ret = si_thermal_enable_alert(adev, false);
- if (ret)
- return ret;
- ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- ret = si_thermal_enable_alert(adev, true);
- if (ret)
- return ret;
-
- return ret;
-}
-
-static void si_dpm_disable(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
-
- if (!amdgpu_si_is_smc_running(adev))
- return;
- si_thermal_stop_thermal_controller(adev);
- si_disable_ulv(adev);
- si_clear_vc(adev);
- if (pi->thermal_protection)
- si_enable_thermal_protection(adev, false);
- si_enable_power_containment(adev, boot_ps, false);
- si_enable_smc_cac(adev, boot_ps, false);
- si_enable_spread_spectrum(adev, false);
- si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
- si_stop_dpm(adev);
- si_reset_to_default(adev);
- si_dpm_stop_smc(adev);
- si_force_switch_to_arb_f0(adev);
-
- ni_update_current_ps(adev, boot_ps);
-}
-
-static int si_dpm_pre_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
- struct amdgpu_ps *new_ps = &requested_ps;
-
- ni_update_requested_ps(adev, new_ps);
- si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
-
- return 0;
-}
-
-static int si_power_control_set_level(struct amdgpu_device *adev)
-{
- struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
- int ret;
-
- ret = si_restrict_performance_levels_before_switch(adev);
- if (ret)
- return ret;
- ret = si_halt_smc(adev);
- if (ret)
- return ret;
- ret = si_populate_smc_tdp_limits(adev, new_ps);
- if (ret)
- return ret;
- ret = si_populate_smc_tdp_limits_2(adev, new_ps);
- if (ret)
- return ret;
- ret = si_resume_smc(adev);
- if (ret)
- return ret;
- ret = si_set_sw_state(adev);
- if (ret)
- return ret;
- return 0;
-}
-
-static void si_set_vce_clock(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps,
- struct amdgpu_ps *old_rps)
-{
- if ((old_rps->evclk != new_rps->evclk) ||
- (old_rps->ecclk != new_rps->ecclk)) {
- /* Turn the clocks on when encoding, off otherwise */
- if (new_rps->evclk || new_rps->ecclk) {
- /* Place holder for future VCE1.0 porting to amdgpu
- vce_v1_0_enable_mgcg(adev, false, false);*/
- } else {
- /* Place holder for future VCE1.0 porting to amdgpu
- vce_v1_0_enable_mgcg(adev, true, false);
- amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);*/
- }
- }
-}
-
-static int si_dpm_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
- struct amdgpu_ps *old_ps = &eg_pi->current_rps;
- int ret;
-
- ret = si_disable_ulv(adev);
- if (ret) {
- DRM_ERROR("si_disable_ulv failed\n");
- return ret;
- }
- ret = si_restrict_performance_levels_before_switch(adev);
- if (ret) {
- DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
- return ret;
- }
- if (eg_pi->pcie_performance_request)
- si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
- ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
- ret = si_enable_power_containment(adev, new_ps, false);
- if (ret) {
- DRM_ERROR("si_enable_power_containment failed\n");
- return ret;
- }
- ret = si_enable_smc_cac(adev, new_ps, false);
- if (ret) {
- DRM_ERROR("si_enable_smc_cac failed\n");
- return ret;
- }
- ret = si_halt_smc(adev);
- if (ret) {
- DRM_ERROR("si_halt_smc failed\n");
- return ret;
- }
- ret = si_upload_sw_state(adev, new_ps);
- if (ret) {
- DRM_ERROR("si_upload_sw_state failed\n");
- return ret;
- }
- ret = si_upload_smc_data(adev);
- if (ret) {
- DRM_ERROR("si_upload_smc_data failed\n");
- return ret;
- }
- ret = si_upload_ulv_state(adev);
- if (ret) {
- DRM_ERROR("si_upload_ulv_state failed\n");
- return ret;
- }
- if (eg_pi->dynamic_ac_timing) {
- ret = si_upload_mc_reg_table(adev, new_ps);
- if (ret) {
- DRM_ERROR("si_upload_mc_reg_table failed\n");
- return ret;
- }
- }
- ret = si_program_memory_timing_parameters(adev, new_ps);
- if (ret) {
- DRM_ERROR("si_program_memory_timing_parameters failed\n");
- return ret;
- }
- si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
-
- ret = si_resume_smc(adev);
- if (ret) {
- DRM_ERROR("si_resume_smc failed\n");
- return ret;
- }
- ret = si_set_sw_state(adev);
- if (ret) {
- DRM_ERROR("si_set_sw_state failed\n");
- return ret;
- }
- ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
- si_set_vce_clock(adev, new_ps, old_ps);
- if (eg_pi->pcie_performance_request)
- si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
- ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
- if (ret) {
- DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
- return ret;
- }
- ret = si_enable_smc_cac(adev, new_ps, true);
- if (ret) {
- DRM_ERROR("si_enable_smc_cac failed\n");
- return ret;
- }
- ret = si_enable_power_containment(adev, new_ps, true);
- if (ret) {
- DRM_ERROR("si_enable_power_containment failed\n");
- return ret;
- }
-
- ret = si_power_control_set_level(adev);
- if (ret) {
- DRM_ERROR("si_power_control_set_level failed\n");
- return ret;
- }
-
- return 0;
-}
-
-static void si_dpm_post_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
-
- ni_update_current_ps(adev, new_ps);
-}
-
-#if 0
-void si_dpm_reset_asic(struct amdgpu_device *adev)
-{
- si_restrict_performance_levels_before_switch(adev);
- si_disable_ulv(adev);
- si_set_boot_state(adev);
-}
-#endif
-
-static void si_dpm_display_configuration_changed(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- si_program_display_gap(adev);
-}
-
-
-static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps,
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
- u8 table_rev)
-{
- rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
- rps->class = le16_to_cpu(non_clock_info->usClassification);
- rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
-
- if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
- rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
- rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else if (r600_is_uvd_state(rps->class, rps->class2)) {
- rps->vclk = RV770_DEFAULT_VCLK_FREQ;
- rps->dclk = RV770_DEFAULT_DCLK_FREQ;
- } else {
- rps->vclk = 0;
- rps->dclk = 0;
- }
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- adev->pm.dpm.boot_ps = rps;
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- adev->pm.dpm.uvd_ps = rps;
-}
-
-static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps, int index,
- union pplib_clock_info *clock_info)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ps *ps = si_get_ps(rps);
- u16 leakage_voltage;
- struct rv7xx_pl *pl = &ps->performance_levels[index];
- int ret;
-
- ps->performance_level_count = index + 1;
-
- pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
- pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
- pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
- pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
-
- pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
- pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
- pl->flags = le32_to_cpu(clock_info->si.ulFlags);
- pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- clock_info->si.ucPCIEGen);
-
- /* patch up vddc if necessary */
- ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
- &leakage_voltage);
- if (ret == 0)
- pl->vddc = leakage_voltage;
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
- pi->acpi_vddc = pl->vddc;
- eg_pi->acpi_vddci = pl->vddci;
- si_pi->acpi_pcie_gen = pl->pcie_gen;
- }
-
- if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
- index == 0) {
- /* XXX disable for A0 tahiti */
- si_pi->ulv.supported = false;
- si_pi->ulv.pl = *pl;
- si_pi->ulv.one_pcie_lane_in_ulv = false;
- si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
- si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
- si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
- }
-
- if (pi->min_vddc_in_table > pl->vddc)
- pi->min_vddc_in_table = pl->vddc;
-
- if (pi->max_vddc_in_table < pl->vddc)
- pi->max_vddc_in_table = pl->vddc;
-
- /* patch up boot state */
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
- u16 vddc, vddci, mvdd;
- amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
- pl->mclk = adev->clock.default_mclk;
- pl->sclk = adev->clock.default_sclk;
- pl->vddc = vddc;
- pl->vddci = vddci;
- si_pi->mvdd_bootup_value = mvdd;
- }
-
- if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
- ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
- }
-}
-
-union pplib_power_state {
- struct _ATOM_PPLIB_STATE v1;
- struct _ATOM_PPLIB_STATE_V2 v2;
-};
-
-static int si_parse_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
- union pplib_power_state *power_state;
- int i, j, k, non_clock_array_index, clock_array_index;
- union pplib_clock_info *clock_info;
- struct _StateArray *state_array;
- struct _ClockInfoArray *clock_info_array;
- struct _NonClockInfoArray *non_clock_info_array;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- u8 *power_state_offset;
- struct si_ps *ps;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- amdgpu_add_thermal_controller(adev);
-
- state_array = (struct _StateArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usStateArrayOffset));
- clock_info_array = (struct _ClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
- non_clock_info_array = (struct _NonClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
-
- adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
- sizeof(struct amdgpu_ps),
- GFP_KERNEL);
- if (!adev->pm.dpm.ps)
- return -ENOMEM;
- power_state_offset = (u8 *)state_array->states;
- for (i = 0; i < state_array->ucNumEntries; i++) {
- u8 *idx;
- power_state = (union pplib_power_state *)power_state_offset;
- non_clock_array_index = power_state->v2.nonClockInfoIndex;
- non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
- &non_clock_info_array->nonClockInfo[non_clock_array_index];
- ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
- return -ENOMEM;
- }
- adev->pm.dpm.ps[i].ps_priv = ps;
- si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
- non_clock_info,
- non_clock_info_array->ucEntrySize);
- k = 0;
- idx = (u8 *)&power_state->v2.clockInfoIndex[0];
- for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = idx[j];
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
- if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
- break;
- clock_info = (union pplib_clock_info *)
- ((u8 *)&clock_info_array->clockInfo[0] +
- (clock_array_index * clock_info_array->ucEntrySize));
- si_parse_pplib_clock_info(adev,
- &adev->pm.dpm.ps[i], k,
- clock_info);
- k++;
- }
- power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
- }
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
-
- /* fill in the vce power states */
- for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
- u32 sclk, mclk;
- clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
- clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
- sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
- sclk |= clock_info->si.ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
- mclk |= clock_info->si.ucMemoryClockHigh << 16;
- adev->pm.dpm.vce_states[i].sclk = sclk;
- adev->pm.dpm.vce_states[i].mclk = mclk;
- }
-
- return 0;
-}
-
-static int si_dpm_init(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi;
- struct evergreen_power_info *eg_pi;
- struct ni_power_info *ni_pi;
- struct si_power_info *si_pi;
- struct atom_clock_dividers dividers;
- int ret;
-
- si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
- if (si_pi == NULL)
- return -ENOMEM;
- adev->pm.dpm.priv = si_pi;
- ni_pi = &si_pi->ni;
- eg_pi = &ni_pi->eg;
- pi = &eg_pi->rv7xx;
-
- si_pi->sys_pcie_mask =
- adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
- si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
-
- si_set_max_cu_value(adev);
-
- rv770_get_max_vddc(adev);
- si_get_leakage_vddc(adev);
- si_patch_dependency_tables_based_on_leakage(adev);
-
- pi->acpi_vddc = 0;
- eg_pi->acpi_vddci = 0;
- pi->min_vddc_in_table = 0;
- pi->max_vddc_in_table = 0;
-
- ret = amdgpu_get_platform_caps(adev);
- if (ret)
- return ret;
-
- ret = amdgpu_parse_extended_power_table(adev);
- if (ret)
- return ret;
-
- ret = si_parse_power_table(adev);
- if (ret)
- return ret;
-
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kcalloc(4,
- sizeof(struct amdgpu_clock_voltage_dependency_entry),
- GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
-
- if (adev->pm.dpm.voltage_response_time == 0)
- adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
- if (adev->pm.dpm.backbias_response_time == 0)
- adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- 0, false, &dividers);
- if (ret)
- pi->ref_div = dividers.ref_div + 1;
- else
- pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
-
- eg_pi->smu_uvd_hs = false;
-
- pi->mclk_strobe_mode_threshold = 40000;
- if (si_is_special_1gb_platform(adev))
- pi->mclk_stutter_mode_threshold = 0;
- else
- pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
- pi->mclk_edc_enable_threshold = 40000;
- eg_pi->mclk_edc_wr_enable_threshold = 40000;
-
- ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
-
- pi->voltage_control =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
- VOLTAGE_OBJ_GPIO_LUT);
- if (!pi->voltage_control) {
- si_pi->voltage_control_svi2 =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
- VOLTAGE_OBJ_SVID2);
- if (si_pi->voltage_control_svi2)
- amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
- &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
- }
-
- pi->mvdd_control =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
- VOLTAGE_OBJ_GPIO_LUT);
-
- eg_pi->vddci_control =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
- VOLTAGE_OBJ_GPIO_LUT);
- if (!eg_pi->vddci_control)
- si_pi->vddci_control_svi2 =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
- VOLTAGE_OBJ_SVID2);
-
- si_pi->vddc_phase_shed_control =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
- VOLTAGE_OBJ_PHASE_LUT);
-
- rv770_get_engine_memory_ss(adev);
-
- pi->asi = RV770_ASI_DFLT;
- pi->pasi = CYPRESS_HASI_DFLT;
- pi->vrc = SISLANDS_VRC_DFLT;
-
- pi->gfx_clock_gating = true;
-
- eg_pi->sclk_deep_sleep = true;
- si_pi->sclk_deep_sleep_above_low = false;
-
- if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
- pi->thermal_protection = true;
- else
- pi->thermal_protection = false;
-
- eg_pi->dynamic_ac_timing = true;
-
- eg_pi->light_sleep = true;
-#if defined(CONFIG_ACPI)
- eg_pi->pcie_performance_request =
- amdgpu_acpi_is_pcie_performance_request_supported(adev);
-#else
- eg_pi->pcie_performance_request = false;
-#endif
-
- si_pi->sram_end = SMC_RAM_END;
-
- adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
- adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
- adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
- adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
- adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
- adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
- adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
-
- si_initialize_powertune_defaults(adev);
-
- /* make sure dc limits are valid */
- if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
- (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
-
- si_pi->fan_ctrl_is_in_default_mode = true;
-
- return 0;
-}
-
-static void si_dpm_fini(struct amdgpu_device *adev)
-{
- int i;
-
- if (adev->pm.dpm.ps)
- for (i = 0; i < adev->pm.dpm.num_ps; i++)
- kfree(adev->pm.dpm.ps[i].ps_priv);
- kfree(adev->pm.dpm.ps);
- kfree(adev->pm.dpm.priv);
- kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
- amdgpu_free_extended_power_table(adev);
-}
-
-static void si_dpm_debugfs_print_current_performance_level(void *handle,
- struct seq_file *m)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct amdgpu_ps *rps = &eg_pi->current_rps;
- struct si_ps *ps = si_get_ps(rps);
- struct rv7xx_pl *pl;
- u32 current_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
-
- if (current_index >= ps->performance_level_count) {
- seq_printf(m, "invalid dpm profile %d\n", current_index);
- } else {
- pl = &ps->performance_levels[current_index];
- seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
- current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
- }
-}
-
-static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 cg_thermal_int;
-
- switch (type) {
- case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-
-static int si_dpm_process_interrupt(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- bool queue_thermal = false;
-
- if (entry == NULL)
- return -EINVAL;
-
- switch (entry->src_id) {
- case 230: /* thermal low to high */
- DRM_DEBUG("IH: thermal low to high\n");
- adev->pm.dpm.thermal.high_to_low = false;
- queue_thermal = true;
- break;
- case 231: /* thermal high to low */
- DRM_DEBUG("IH: thermal high to low\n");
- adev->pm.dpm.thermal.high_to_low = true;
- queue_thermal = true;
- break;
- default:
- break;
- }
-
- if (queue_thermal)
- schedule_work(&adev->pm.dpm.thermal.work);
-
- return 0;
-}
-
-static int si_dpm_late_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->pm.dpm_enabled)
- return 0;
-
- ret = si_set_temperature_range(adev);
- if (ret)
- return ret;
-#if 0 //TODO ?
- si_dpm_powergate_uvd(adev, true);
-#endif
- return 0;
-}
-
-/**
- * si_dpm_init_microcode - load ucode images from disk
- *
- * @adev: amdgpu_device pointer
- *
- * Use the firmware interface to load the ucode images into
- * the driver (not loaded into hw).
- * Returns 0 on success, error on failure.
- */
-static int si_dpm_init_microcode(struct amdgpu_device *adev)
-{
- const char *chip_name;
- char fw_name[30];
- int err;
-
- DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_TAHITI:
- chip_name = "tahiti";
- break;
- case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811)))
- chip_name = "pitcairn_k";
- else
- chip_name = "pitcairn";
- break;
- case CHIP_VERDE:
- if (((adev->pdev->device == 0x6820) &&
- ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83))) ||
- ((adev->pdev->device == 0x6821) &&
- ((adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87))) ||
- ((adev->pdev->revision == 0x87) &&
- ((adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682b))))
- chip_name = "verde_k";
- else
- chip_name = "verde";
- break;
- case CHIP_OLAND:
- if (((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6600) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605) ||
- (adev->pdev->device == 0x6610))) ||
- ((adev->pdev->revision == 0x83) &&
- (adev->pdev->device == 0x6610)))
- chip_name = "oland_k";
- else
- chip_name = "oland";
- break;
- case CHIP_HAINAN:
- if (((adev->pdev->revision == 0x81) &&
- (adev->pdev->device == 0x6660)) ||
- ((adev->pdev->revision == 0x83) &&
- ((adev->pdev->device == 0x6660) ||
- (adev->pdev->device == 0x6663) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667))))
- chip_name = "hainan_k";
- else if ((adev->pdev->revision == 0xc3) &&
- (adev->pdev->device == 0x6665))
- chip_name = "banks_k_2";
- else
- chip_name = "hainan";
- break;
- default: BUG();
- }
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
- err, fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-
-}
-
-static int si_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- /* default to balanced state */
- adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
- adev->pm.default_sclk = adev->clock.default_sclk;
- adev->pm.default_mclk = adev->clock.default_mclk;
- adev->pm.current_sclk = adev->clock.default_sclk;
- adev->pm.current_mclk = adev->clock.default_mclk;
- adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
-
- if (amdgpu_dpm == 0)
- return 0;
-
- ret = si_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
- mutex_lock(&adev->pm.mutex);
- ret = si_dpm_init(adev);
- if (ret)
- goto dpm_failed;
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- if (amdgpu_dpm == 1)
- amdgpu_pm_print_power_states(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_INFO("amdgpu: dpm initialized\n");
-
- return 0;
-
-dpm_failed:
- si_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
- return ret;
-}
-
-static int si_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- flush_work(&adev->pm.dpm.thermal.work);
-
- mutex_lock(&adev->pm.mutex);
- si_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
-
- return 0;
-}
-
-static int si_dpm_hw_init(void *handle)
-{
- int ret;
-
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!amdgpu_dpm)
- return 0;
-
- mutex_lock(&adev->pm.mutex);
- si_dpm_setup_asic(adev);
- ret = si_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
- return ret;
-}
-
-static int si_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- si_dpm_disable(adev);
- mutex_unlock(&adev->pm.mutex);
- }
-
- return 0;
-}
-
-static int si_dpm_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- /* disable dpm */
- si_dpm_disable(adev);
- /* reset the power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- mutex_unlock(&adev->pm.mutex);
- }
- return 0;
-}
-
-static int si_dpm_resume(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- /* asic init will reset to the boot state */
- mutex_lock(&adev->pm.mutex);
- si_dpm_setup_asic(adev);
- ret = si_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
- }
- return 0;
-}
-
-static bool si_dpm_is_idle(void *handle)
-{
- /* XXX */
- return true;
-}
-
-static int si_dpm_wait_for_idle(void *handle)
-{
- /* XXX */
- return 0;
-}
-
-static int si_dpm_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int si_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int si_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-/* get temperature in millidegrees */
-static int si_dpm_get_temp(void *handle)
-{
- u32 temp;
- int actual_temp = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
- CTF_TEMP_SHIFT;
-
- if (temp & 0x200)
- actual_temp = 255;
- else
- actual_temp = temp & 0x1ff;
-
- actual_temp = (actual_temp * 1000);
-
- return actual_temp;
-}
-
-static u32 si_dpm_get_sclk(void *handle, bool low)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
-
- if (low)
- return requested_state->performance_levels[0].sclk;
- else
- return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
-}
-
-static u32 si_dpm_get_mclk(void *handle, bool low)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
-
- if (low)
- return requested_state->performance_levels[0].mclk;
- else
- return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
-}
-
-static void si_dpm_print_power_state(void *handle,
- void *current_ps)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
- struct si_ps *ps = si_get_ps(rps);
- struct rv7xx_pl *pl;
- int i;
-
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- for (i = 0; i < ps->performance_level_count; i++) {
- pl = &ps->performance_levels[i];
- if (adev->asic_type >= CHIP_TAHITI)
- DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
- i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
- else
- DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
- i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
- }
- amdgpu_dpm_print_ps_status(adev, rps);
-}
-
-static int si_dpm_early_init(void *handle)
-{
-
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- adev->powerplay.pp_funcs = &si_dpm_funcs;
- adev->powerplay.pp_handle = adev;
- si_dpm_set_irq_funcs(adev);
- return 0;
-}
-
-static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
- const struct rv7xx_pl *si_cpl2)
-{
- return ((si_cpl1->mclk == si_cpl2->mclk) &&
- (si_cpl1->sclk == si_cpl2->sclk) &&
- (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
- (si_cpl1->vddc == si_cpl2->vddc) &&
- (si_cpl1->vddci == si_cpl2->vddci));
-}
-
-static int si_check_state_equal(void *handle,
- void *current_ps,
- void *request_ps,
- bool *equal)
-{
- struct si_ps *si_cps;
- struct si_ps *si_rps;
- int i;
- struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
- struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
- return -EINVAL;
-
- si_cps = si_get_ps((struct amdgpu_ps *)cps);
- si_rps = si_get_ps((struct amdgpu_ps *)rps);
-
- if (si_cps == NULL) {
- printk("si_cps is NULL\n");
- *equal = false;
- return 0;
- }
-
- if (si_cps->performance_level_count != si_rps->performance_level_count) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < si_cps->performance_level_count; i++) {
- if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
- &(si_rps->performance_levels[i]))) {
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
- *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
-
- return 0;
-}
-
-static int si_dpm_read_sensor(void *handle, int idx,
- void *value, int *size)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct amdgpu_ps *rps = &eg_pi->current_rps;
- struct si_ps *ps = si_get_ps(rps);
- uint32_t sclk, mclk;
- u32 pl_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
-
- /* size must be at least 4 bytes for all sensors */
- if (*size < 4)
- return -EINVAL;
-
- switch (idx) {
- case AMDGPU_PP_SENSOR_GFX_SCLK:
- if (pl_index < ps->performance_level_count) {
- sclk = ps->performance_levels[pl_index].sclk;
- *((uint32_t *)value) = sclk;
- *size = 4;
- return 0;
- }
- return -EINVAL;
- case AMDGPU_PP_SENSOR_GFX_MCLK:
- if (pl_index < ps->performance_level_count) {
- mclk = ps->performance_levels[pl_index].mclk;
- *((uint32_t *)value) = mclk;
- *size = 4;
- return 0;
- }
- return -EINVAL;
- case AMDGPU_PP_SENSOR_GPU_TEMP:
- *((uint32_t *)value) = si_dpm_get_temp(adev);
- *size = 4;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static const struct amd_ip_funcs si_dpm_ip_funcs = {
- .name = "si_dpm",
- .early_init = si_dpm_early_init,
- .late_init = si_dpm_late_init,
- .sw_init = si_dpm_sw_init,
- .sw_fini = si_dpm_sw_fini,
- .hw_init = si_dpm_hw_init,
- .hw_fini = si_dpm_hw_fini,
- .suspend = si_dpm_suspend,
- .resume = si_dpm_resume,
- .is_idle = si_dpm_is_idle,
- .wait_for_idle = si_dpm_wait_for_idle,
- .soft_reset = si_dpm_soft_reset,
- .set_clockgating_state = si_dpm_set_clockgating_state,
- .set_powergating_state = si_dpm_set_powergating_state,
-};
-
-const struct amdgpu_ip_block_version si_smu_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dpm_ip_funcs,
-};
-
-static const struct amd_pm_funcs si_dpm_funcs = {
- .pre_set_power_state = &si_dpm_pre_set_power_state,
- .set_power_state = &si_dpm_set_power_state,
- .post_set_power_state = &si_dpm_post_set_power_state,
- .display_configuration_changed = &si_dpm_display_configuration_changed,
- .get_sclk = &si_dpm_get_sclk,
- .get_mclk = &si_dpm_get_mclk,
- .print_power_state = &si_dpm_print_power_state,
- .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &si_dpm_force_performance_level,
- .vblank_too_short = &si_dpm_vblank_too_short,
- .set_fan_control_mode = &si_dpm_set_fan_control_mode,
- .get_fan_control_mode = &si_dpm_get_fan_control_mode,
- .set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
- .get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
- .check_state_equal = &si_check_state_equal,
- .get_vce_clock_state = amdgpu_get_vce_clock_state,
- .read_sensor = &si_dpm_read_sensor,
-};
-
-static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
- .set = si_dpm_set_interrupt_state,
- .process = si_dpm_process_interrupt,
-};
-
-static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
-{
- adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
- adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
-}
-
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
deleted file mode 100644
index bc0be6818e21..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ /dev/null
@@ -1,1015 +0,0 @@
-/*
- * Copyright 2012 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __SI_DPM_H__
-#define __SI_DPM_H__
-
-#include "amdgpu_atombios.h"
-#include "sislands_smc.h"
-
-#define MC_CG_CONFIG 0x96f
-#define MC_ARB_CG 0x9fa
-#define CG_ARB_REQ(x) ((x) << 0)
-#define CG_ARB_REQ_MASK (0xff << 0)
-
-#define MC_ARB_DRAM_TIMING_1 0x9fc
-#define MC_ARB_DRAM_TIMING_2 0x9fd
-#define MC_ARB_DRAM_TIMING_3 0x9fe
-#define MC_ARB_DRAM_TIMING2_1 0x9ff
-#define MC_ARB_DRAM_TIMING2_2 0xa00
-#define MC_ARB_DRAM_TIMING2_3 0xa01
-
-#define MAX_NO_OF_MVDD_VALUES 2
-#define MAX_NO_VREG_STEPS 32
-#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
-#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
-#define RV770_ASI_DFLT 1000
-#define CYPRESS_HASI_DFLT 400000
-#define PCIE_PERF_REQ_PECI_GEN1 2
-#define PCIE_PERF_REQ_PECI_GEN2 3
-#define PCIE_PERF_REQ_PECI_GEN3 4
-#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
-#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
-
-#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
-
-#define RV770_SMC_TABLE_ADDRESS 0xB000
-#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
-
-#define SMC_STROBE_RATIO 0x0F
-#define SMC_STROBE_ENABLE 0x10
-
-#define SMC_MC_EDC_RD_FLAG 0x01
-#define SMC_MC_EDC_WR_FLAG 0x02
-#define SMC_MC_RTT_ENABLE 0x04
-#define SMC_MC_STUTTER_EN 0x08
-
-#define RV770_SMC_VOLTAGEMASK_VDDC 0
-#define RV770_SMC_VOLTAGEMASK_MVDD 1
-#define RV770_SMC_VOLTAGEMASK_VDDCI 2
-#define RV770_SMC_VOLTAGEMASK_MAX 4
-
-#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-#define NISLANDS_SMC_STROBE_RATIO 0x0F
-#define NISLANDS_SMC_STROBE_ENABLE 0x10
-
-#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01
-#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02
-#define NISLANDS_SMC_MC_RTT_ENABLE 0x04
-#define NISLANDS_SMC_MC_STUTTER_EN 0x08
-
-#define MAX_NO_VREG_STEPS 32
-
-#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
-#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
-#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
-#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
-
-#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0
-#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1
-#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
-#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3
-
-#define SISLANDS_LEAKAGE_INDEX0 0xff01
-#define SISLANDS_MAX_LEAKAGE_COUNT 4
-
-#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
-#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
-#define SISLANDS_ACPI_STATE_ARB_INDEX 1
-#define SISLANDS_ULV_STATE_ARB_INDEX 2
-#define SISLANDS_DRIVER_STATE_ARB_INDEX 3
-
-#define SISLANDS_DPM2_MAX_PULSE_SKIP 256
-
-#define SISLANDS_DPM2_NEAR_TDP_DEC 10
-#define SISLANDS_DPM2_ABOVE_SAFE_INC 5
-#define SISLANDS_DPM2_BELOW_SAFE_INC 20
-
-#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80
-
-#define SISLANDS_DPM2_MAXPS_PERCENT_H 99
-#define SISLANDS_DPM2_MAXPS_PERCENT_M 99
-
-#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
-#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12
-#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
-#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E
-#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF
-
-#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10
-
-#define SISLANDS_VRC_DFLT 0xC000B3
-#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687
-#define SISLANDS_CGULVPARAMETER_DFLT 0x00040035
-#define SISLANDS_CGULVCONTROL_DFLT 0x1f007550
-
-#define SI_ASI_DFLT 10000
-#define SI_BSP_DFLT 0x41EB
-#define SI_BSU_DFLT 0x2
-#define SI_AH_DFLT 5
-#define SI_RLP_DFLT 25
-#define SI_RMP_DFLT 65
-#define SI_LHP_DFLT 40
-#define SI_LMP_DFLT 15
-#define SI_TD_DFLT 0
-#define SI_UTC_DFLT_00 0x24
-#define SI_UTC_DFLT_01 0x22
-#define SI_UTC_DFLT_02 0x22
-#define SI_UTC_DFLT_03 0x22
-#define SI_UTC_DFLT_04 0x22
-#define SI_UTC_DFLT_05 0x22
-#define SI_UTC_DFLT_06 0x22
-#define SI_UTC_DFLT_07 0x22
-#define SI_UTC_DFLT_08 0x22
-#define SI_UTC_DFLT_09 0x22
-#define SI_UTC_DFLT_10 0x22
-#define SI_UTC_DFLT_11 0x22
-#define SI_UTC_DFLT_12 0x22
-#define SI_UTC_DFLT_13 0x22
-#define SI_UTC_DFLT_14 0x22
-#define SI_DTC_DFLT_00 0x24
-#define SI_DTC_DFLT_01 0x22
-#define SI_DTC_DFLT_02 0x22
-#define SI_DTC_DFLT_03 0x22
-#define SI_DTC_DFLT_04 0x22
-#define SI_DTC_DFLT_05 0x22
-#define SI_DTC_DFLT_06 0x22
-#define SI_DTC_DFLT_07 0x22
-#define SI_DTC_DFLT_08 0x22
-#define SI_DTC_DFLT_09 0x22
-#define SI_DTC_DFLT_10 0x22
-#define SI_DTC_DFLT_11 0x22
-#define SI_DTC_DFLT_12 0x22
-#define SI_DTC_DFLT_13 0x22
-#define SI_DTC_DFLT_14 0x22
-#define SI_VRC_DFLT 0x0000C003
-#define SI_VOLTAGERESPONSETIME_DFLT 1000
-#define SI_BACKBIASRESPONSETIME_DFLT 1000
-#define SI_VRU_DFLT 0x3
-#define SI_SPLLSTEPTIME_DFLT 0x1000
-#define SI_SPLLSTEPUNIT_DFLT 0x3
-#define SI_TPU_DFLT 0
-#define SI_TPC_DFLT 0x200
-#define SI_SSTU_DFLT 0
-#define SI_SST_DFLT 0x00C8
-#define SI_GICST_DFLT 0x200
-#define SI_FCT_DFLT 0x0400
-#define SI_FCTU_DFLT 0
-#define SI_CTXCGTT3DRPHC_DFLT 0x20
-#define SI_CTXCGTT3DRSDC_DFLT 0x40
-#define SI_VDDC3DOORPHC_DFLT 0x100
-#define SI_VDDC3DOORSDC_DFLT 0x7
-#define SI_VDDC3DOORSU_DFLT 0
-#define SI_MPLLLOCKTIME_DFLT 100
-#define SI_MPLLRESETTIME_DFLT 150
-#define SI_VCOSTEPPCT_DFLT 20
-#define SI_ENDINGVCOSTEPPCT_DFLT 5
-#define SI_REFERENCEDIVIDER_DFLT 4
-
-#define SI_PM_NUMBER_OF_TC 15
-#define SI_PM_NUMBER_OF_SCLKS 20
-#define SI_PM_NUMBER_OF_MCLKS 4
-#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4
-#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3
-
-/* XXX are these ok? */
-#define SI_TEMP_RANGE_MIN (90 * 1000)
-#define SI_TEMP_RANGE_MAX (120 * 1000)
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-enum ni_dc_cac_level
-{
- NISLANDS_DCCAC_LEVEL_0 = 0,
- NISLANDS_DCCAC_LEVEL_1,
- NISLANDS_DCCAC_LEVEL_2,
- NISLANDS_DCCAC_LEVEL_3,
- NISLANDS_DCCAC_LEVEL_4,
- NISLANDS_DCCAC_LEVEL_5,
- NISLANDS_DCCAC_LEVEL_6,
- NISLANDS_DCCAC_LEVEL_7,
- NISLANDS_DCCAC_MAX_LEVELS
-};
-
-enum si_cac_config_reg_type
-{
- SISLANDS_CACCONFIG_MMR = 0,
- SISLANDS_CACCONFIG_CGIND,
- SISLANDS_CACCONFIG_MAX
-};
-
-enum si_power_level {
- SI_POWER_LEVEL_LOW = 0,
- SI_POWER_LEVEL_MEDIUM = 1,
- SI_POWER_LEVEL_HIGH = 2,
- SI_POWER_LEVEL_CTXSW = 3,
-};
-
-enum si_td {
- SI_TD_AUTO,
- SI_TD_UP,
- SI_TD_DOWN,
-};
-
-enum si_display_watermark {
- SI_DISPLAY_WATERMARK_LOW = 0,
- SI_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum si_display_gap
-{
- SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- SI_PM_DISPLAY_GAP_VBLANK = 1,
- SI_PM_DISPLAY_GAP_WATERMARK = 2,
- SI_PM_DISPLAY_GAP_IGNORE = 3,
-};
-
-extern const struct amdgpu_ip_block_version si_smu_ip_block;
-
-struct ni_leakage_coeffients
-{
- u32 at;
- u32 bt;
- u32 av;
- u32 bv;
- s32 t_slope;
- s32 t_intercept;
- u32 t_ref;
-};
-
-struct SMC_Evergreen_MCRegisterAddress
-{
- uint16_t s0;
- uint16_t s1;
-};
-
-typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
-
-struct evergreen_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct evergreen_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct SMC_Evergreen_MCRegisterSet
-{
- uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
-
-struct SMC_Evergreen_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
- SMC_Evergreen_MCRegisterSet data[5];
-};
-
-typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
-
-struct SMC_NIslands_MCRegisterSet
-{
- uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
-
-struct ni_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct SMC_NIslands_MCRegisterAddress
-{
- uint16_t s0;
- uint16_t s1;
-};
-
-typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
-
-struct SMC_NIslands_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
- SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
-};
-
-typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
-
-struct evergreen_ulv_param {
- bool supported;
- struct rv7xx_pl *pl;
-};
-
-struct evergreen_arb_registers {
- u32 mc_arb_dram_timing;
- u32 mc_arb_dram_timing2;
- u32 mc_arb_rfsh_rate;
- u32 mc_arb_burst_time;
-};
-
-struct at {
- u32 rlp;
- u32 rmp;
- u32 lhp;
- u32 lmp;
-};
-
-struct ni_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_ad_func_cntl;
- u32 mpll_ad_func_cntl_2;
- u32 mpll_dq_func_cntl;
- u32 mpll_dq_func_cntl_2;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct RV770_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
-
-struct RV770_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL_2;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL_2;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
-
-
-struct RV730_SMC_MCLK_VALUE
-{
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL2;
- uint32_t vMPLL_FUNC_CNTL3;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
-
-struct RV770_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t padding;
-};
-
-typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
-
-union RV7XX_SMC_MCLK_VALUE
-{
- RV770_SMC_MCLK_VALUE mclk770;
- RV730_SMC_MCLK_VALUE mclk730;
-};
-
-typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
-
-struct RV770_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t arbValue;
- union{
- uint8_t seqValue;
- uint8_t ACIndex;
- };
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t gen2XSP;
- uint8_t backbias;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint32_t aT;
- uint32_t bSP;
- RV770_SMC_SCLK_VALUE sclk;
- RV7XX_SMC_MCLK_VALUE mclk;
- RV770_SMC_VOLTAGE_VALUE vddc;
- RV770_SMC_VOLTAGE_VALUE mvdd;
- RV770_SMC_VOLTAGE_VALUE vddci;
- uint8_t reserved1;
- uint8_t reserved2;
- uint8_t stateFlags;
- uint8_t padding;
-};
-
-typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
-
-struct RV770_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t padding1;
- uint8_t padding2;
- uint8_t padding3;
- RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
-};
-
-typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
-
-struct RV770_SMC_VOLTAGEMASKTABLE
-{
- uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
- uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
-
-struct RV770_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[MAX_NO_VREG_STEPS];
- RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- RV770_SMC_SWSTATE initialState;
- RV770_SMC_SWSTATE ACPIState;
- RV770_SMC_SWSTATE driverState;
- RV770_SMC_SWSTATE ULVState;
-};
-
-typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
-
-struct vddc_table_entry {
- u16 vddc;
- u8 vddc_index;
- u8 high_smio;
- u32 low_smio;
-};
-
-struct rv770_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mpll_ad_func_cntl;
- u32 mpll_ad_func_cntl_2;
- u32 mpll_dq_func_cntl;
- u32 mpll_dq_func_cntl_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct rv730_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_func_cntl;
- u32 mpll_func_cntl2;
- u32 mpll_func_cntl3;
- u32 mpll_ss;
- u32 mpll_ss2;
-};
-
-union r7xx_clock_registers {
- struct rv770_clock_registers rv770;
- struct rv730_clock_registers rv730;
-};
-
-struct rv7xx_power_info {
- /* flags */
- bool mem_gddr5;
- bool pcie_gen2;
- bool dynamic_pcie_gen2;
- bool acpi_pcie_gen2;
- bool boot_in_gen2;
- bool voltage_control; /* vddc */
- bool mvdd_control;
- bool sclk_ss;
- bool mclk_ss;
- bool dynamic_ss;
- bool gfx_clock_gating;
- bool mg_clock_gating;
- bool mgcgtssm;
- bool power_gating;
- bool thermal_protection;
- bool display_gap;
- bool dcodt;
- bool ulps;
- /* registers */
- union r7xx_clock_registers clk_regs;
- u32 s0_vid_lower_smio_cntl;
- /* voltage */
- u32 vddc_mask_low;
- u32 mvdd_mask_low;
- u32 mvdd_split_frequency;
- u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
- u16 max_vddc;
- u16 max_vddc_in_table;
- u16 min_vddc_in_table;
- struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
- u8 valid_vddc_entries;
- /* dc odt */
- u32 mclk_odt_threshold;
- u8 odt_value_0[2];
- u8 odt_value_1[2];
- /* stored values */
- u32 boot_sclk;
- u16 acpi_vddc;
- u32 ref_div;
- u32 active_auto_throttle_sources;
- u32 mclk_stutter_mode_threshold;
- u32 mclk_strobe_mode_threshold;
- u32 mclk_edc_enable_threshold;
- u32 bsp;
- u32 bsu;
- u32 pbsp;
- u32 pbsu;
- u32 dsp;
- u32 psp;
- u32 asi;
- u32 pasi;
- u32 vrc;
- u32 restricted_levels;
- u32 rlp;
- u32 rmp;
- u32 lhp;
- u32 lmp;
- /* smc offsets */
- u16 state_table_start;
- u16 soft_regs_start;
- u16 sram_end;
- /* scratch structs */
- RV770_SMC_STATETABLE smc_statetable;
-};
-
-struct rv7xx_pl {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci; /* eg+ only */
- u32 flags;
- enum amdgpu_pcie_gen pcie_gen; /* si+ only */
-};
-
-struct rv7xx_ps {
- struct rv7xx_pl high;
- struct rv7xx_pl medium;
- struct rv7xx_pl low;
- bool dc_compatible;
-};
-
-struct si_ps {
- u16 performance_level_count;
- bool dc_compatible;
- struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
-};
-
-struct ni_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct ni_cac_data
-{
- struct ni_leakage_coeffients leakage_coefficients;
- u32 i_leakage;
- s32 leakage_minimum_temperature;
- u32 pwr_const;
- u32 dc_cac_value;
- u32 bif_cac_value;
- u32 lkge_pwr;
- u8 mc_wr_weight;
- u8 mc_rd_weight;
- u8 allow_ovrflw;
- u8 num_win_tdp;
- u8 l2num_win_tdp;
- u8 lts_truncate_n;
-};
-
-struct evergreen_power_info {
- /* must be first! */
- struct rv7xx_power_info rv7xx;
- /* flags */
- bool vddci_control;
- bool dynamic_ac_timing;
- bool abm;
- bool mcls;
- bool light_sleep;
- bool memory_transition;
- bool pcie_performance_request;
- bool pcie_performance_request_registered;
- bool sclk_deep_sleep;
- bool dll_default_on;
- bool ls_clock_gating;
- bool smu_uvd_hs;
- bool uvd_enabled;
- /* stored values */
- u16 acpi_vddci;
- u8 mvdd_high_index;
- u8 mvdd_low_index;
- u32 mclk_edc_wr_enable_threshold;
- struct evergreen_mc_reg_table mc_reg_table;
- struct atom_voltage_table vddc_voltage_table;
- struct atom_voltage_table vddci_voltage_table;
- struct evergreen_arb_registers bootup_arb_registers;
- struct evergreen_ulv_param ulv;
- struct at ats[2];
- /* smc offsets */
- u16 mc_reg_table_start;
- struct amdgpu_ps current_rps;
- struct rv7xx_ps current_ps;
- struct amdgpu_ps requested_rps;
- struct rv7xx_ps requested_ps;
-};
-
-struct PP_NIslands_Dpm2PerfLevel
-{
- uint8_t MaxPS;
- uint8_t TgtAct;
- uint8_t MaxPS_StepInc;
- uint8_t MaxPS_StepDec;
- uint8_t PSST;
- uint8_t NearTDPDec;
- uint8_t AboveSafeInc;
- uint8_t BelowSafeInc;
- uint8_t PSDeltaLimit;
- uint8_t PSDeltaWin;
- uint8_t Reserved[6];
-};
-
-typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
-
-struct PP_NIslands_DPM2Parameters
-{
- uint32_t TDPLimit;
- uint32_t NearTDPLimit;
- uint32_t SafePowerLimit;
- uint32_t PowerBoostLimit;
-};
-typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
-
-struct NISLANDS_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
-
-struct NISLANDS_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL_2;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL_2;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
-
-struct NISLANDS_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t padding;
-};
-
-typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
-
-struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t arbValue;
- uint8_t ACIndex;
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t reserved1;
- uint8_t reserved2;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint32_t aT;
- uint32_t bSP;
- NISLANDS_SMC_SCLK_VALUE sclk;
- NISLANDS_SMC_MCLK_VALUE mclk;
- NISLANDS_SMC_VOLTAGE_VALUE vddc;
- NISLANDS_SMC_VOLTAGE_VALUE mvdd;
- NISLANDS_SMC_VOLTAGE_VALUE vddci;
- NISLANDS_SMC_VOLTAGE_VALUE std_vddc;
- uint32_t powergate_en;
- uint8_t hUp;
- uint8_t hDown;
- uint8_t stateFlags;
- uint8_t arbRefreshState;
- uint32_t SQPowerThrottle;
- uint32_t SQPowerThrottle_2;
- uint32_t reserved[2];
- PP_NIslands_Dpm2PerfLevel dpm2;
-};
-
-typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-
-struct NISLANDS_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t levelCount;
- uint8_t padding2;
- uint8_t padding3;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[];
-};
-
-typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
-
-struct NISLANDS_SMC_VOLTAGEMASKTABLE
-{
- uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
- uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
-
-#define NISLANDS_MAX_NO_VREG_STEPS 32
-
-struct NISLANDS_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- PP_NIslands_DPM2Parameters dpm2Params;
- NISLANDS_SMC_SWSTATE initialState;
- NISLANDS_SMC_SWSTATE ACPIState;
- NISLANDS_SMC_SWSTATE ULVState;
- NISLANDS_SMC_SWSTATE driverState;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
-};
-
-typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
-
-struct ni_power_info {
- /* must be first! */
- struct evergreen_power_info eg;
- struct ni_clock_registers clock_registers;
- struct ni_mc_reg_table mc_reg_table;
- u32 mclk_rtt_mode_threshold;
- /* flags */
- bool use_power_boost_limit;
- bool support_cac_long_term_average;
- bool cac_enabled;
- bool cac_configuration_required;
- bool driver_calculate_cac_leakage;
- bool pc_enabled;
- bool enable_power_containment;
- bool enable_cac;
- bool enable_sq_ramping;
- /* smc offsets */
- u16 arb_table_start;
- u16 fan_table_start;
- u16 cac_table_start;
- u16 spll_table_start;
- /* CAC stuff */
- struct ni_cac_data cac_data;
- u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
- const struct ni_cac_weights *cac_weights;
- u8 lta_window_size;
- u8 lts_truncate;
- struct si_ps current_ps;
- struct si_ps requested_ps;
- /* scratch structs */
- SMC_NIslands_MCRegisters smc_mc_reg_table;
- NISLANDS_SMC_STATETABLE smc_statetable;
-};
-
-struct si_cac_config_reg
-{
- u32 offset;
- u32 mask;
- u32 shift;
- u32 value;
- enum si_cac_config_reg_type type;
-};
-
-struct si_powertune_data
-{
- u32 cac_window;
- u32 l2_lta_window_size_default;
- u8 lts_truncate_default;
- u8 shift_n_default;
- u8 operating_temp;
- struct ni_leakage_coeffients leakage_coefficients;
- u32 fixed_kt;
- u32 lkge_lut_v0_percent;
- u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
- bool enable_powertune_by_default;
-};
-
-struct si_dyn_powertune_data
-{
- u32 cac_leakage;
- s32 leakage_minimum_temperature;
- u32 wintime;
- u32 l2_lta_window_size;
- u8 lts_truncate;
- u8 shift_n;
- u8 dc_pwr_value;
- bool disable_uvd_powertune;
-};
-
-struct si_dte_data
-{
- u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
- u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
- u32 k;
- u32 t0;
- u32 max_t;
- u8 window_size;
- u8 temp_select;
- u8 dte_mode;
- u8 tdep_count;
- u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- u32 t_threshold;
- bool enable_dte_by_default;
-};
-
-struct si_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 dll_cntl;
- u32 mclk_pwrmgt_cntl;
- u32 mpll_ad_func_cntl;
- u32 mpll_dq_func_cntl;
- u32 mpll_func_cntl;
- u32 mpll_func_cntl_1;
- u32 mpll_func_cntl_2;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct si_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct si_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct si_leakage_voltage_entry
-{
- u16 voltage;
- u16 leakage_index;
-};
-
-struct si_leakage_voltage
-{
- u16 count;
- struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
-};
-
-
-struct si_ulv_param {
- bool supported;
- u32 cg_ulv_control;
- u32 cg_ulv_parameter;
- u32 volt_change_delay;
- struct rv7xx_pl pl;
- bool one_pcie_lane_in_ulv;
-};
-
-struct si_power_info {
- /* must be first! */
- struct ni_power_info ni;
- struct si_clock_registers clock_registers;
- struct si_mc_reg_table mc_reg_table;
- struct atom_voltage_table mvdd_voltage_table;
- struct atom_voltage_table vddc_phase_shed_table;
- struct si_leakage_voltage leakage_voltage;
- u16 mvdd_bootup_value;
- struct si_ulv_param ulv;
- u32 max_cu;
- /* pcie gen */
- enum amdgpu_pcie_gen force_pcie_gen;
- enum amdgpu_pcie_gen boot_pcie_gen;
- enum amdgpu_pcie_gen acpi_pcie_gen;
- u32 sys_pcie_mask;
- /* flags */
- bool enable_dte;
- bool enable_ppm;
- bool vddc_phase_shed_control;
- bool pspp_notify_required;
- bool sclk_deep_sleep_above_low;
- bool voltage_control_svi2;
- bool vddci_control_svi2;
- /* smc offsets */
- u32 sram_end;
- u32 state_table_start;
- u32 soft_regs_start;
- u32 mc_reg_table_start;
- u32 arb_table_start;
- u32 cac_table_start;
- u32 dte_table_start;
- u32 spll_table_start;
- u32 papm_cfg_table_start;
- u32 fan_table_start;
- /* CAC stuff */
- const struct si_cac_config_reg *cac_weights;
- const struct si_cac_config_reg *lcac_config;
- const struct si_cac_config_reg *cac_override;
- const struct si_powertune_data *powertune_data;
- struct si_dyn_powertune_data dyn_powertune_data;
- /* DTE stuff */
- struct si_dte_data dte_data;
- /* scratch structs */
- SMC_SIslands_MCRegisters smc_mc_reg_table;
- SISLANDS_SMC_STATETABLE smc_statetable;
- PP_SIslands_PAPMParameters papm_parm;
- /* SVI2 */
- u8 svd_gpio_id;
- u8 svc_gpio_id;
- /* fan control */
- bool fan_ctrl_is_in_default_mode;
- u32 t_min;
- u32 fan_ctrl_default_mode;
- bool fan_is_controlled_by_smc;
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
deleted file mode 100644
index 8f994ffa9cd1..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/si_smc.c
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Alex Deucher
- */
-
-#include <linux/firmware.h>
-
-#include "amdgpu.h"
-#include "sid.h"
-#include "ppsmc.h"
-#include "amdgpu_ucode.h"
-#include "sislands_smc.h"
-
-static int si_set_smc_sram_address(struct amdgpu_device *adev,
- u32 smc_address, u32 limit)
-{
- if (smc_address & 3)
- return -EINVAL;
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(SMC_IND_INDEX_0, smc_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
-
- return 0;
-}
-
-int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit)
-{
- unsigned long flags;
- int ret = 0;
- u32 data, original_data, addr, extra_shift;
-
- if (smc_start_address & 3)
- return -EINVAL;
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* SMC address space is BE */
- data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
-
- ret = si_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- WREG32(SMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- /* RMW for the final bytes */
- if (byte_count > 0) {
- data = 0;
-
- ret = si_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- original_data = RREG32(SMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- /* SMC address space is BE */
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (original_data & ~((~0UL) << extra_shift));
-
- ret = si_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- WREG32(SMC_IND_DATA_0, data);
- }
-
-done:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
-
-void amdgpu_si_start_smc(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
-
- tmp &= ~RST_REG;
-
- WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
-}
-
-void amdgpu_si_reset_smc(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
-
- tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
- RST_REG;
- WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
-}
-
-int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
-{
- static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
-
- return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-}
-
-void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
-
- if (enable)
- tmp &= ~CK_DISABLE;
- else
- tmp |= CK_DISABLE;
-
- WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
-}
-
-bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
-{
- u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
- u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
-
- if (!(rst & RST_REG) && !(clk & CK_DISABLE))
- return true;
-
- return false;
-}
-
-PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- u32 tmp;
- int i;
-
- if (!amdgpu_si_is_smc_running(adev))
- return PPSMC_Result_Failed;
-
- WREG32(SMC_MESSAGE_0, msg);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SMC_RESP_0);
- if (tmp != 0)
- break;
- udelay(1);
- }
-
- return (PPSMC_Result)RREG32(SMC_RESP_0);
-}
-
-PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- u32 tmp;
- int i;
-
- if (!amdgpu_si_is_smc_running(adev))
- return PPSMC_Result_OK;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
- if ((tmp & CKEN) == 0)
- break;
- udelay(1);
- }
-
- return PPSMC_Result_OK;
-}
-
-int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- unsigned long flags;
- u32 ucode_start_address;
- u32 ucode_size;
- const u8 *src;
- u32 data;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
-
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- src = (const u8 *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- if (ucode_size & 3)
- return -EINVAL;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, ucode_start_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
- while (ucode_size >= 4) {
- /* SMC address space is BE */
- data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
-
- WREG32(SMC_IND_DATA_0, data);
-
- src += 4;
- ucode_size -= 4;
- }
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 *value, u32 limit)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- ret = si_set_smc_sram_address(adev, smc_address, limit);
- if (ret == 0)
- *value = RREG32(SMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
-
-int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 value, u32 limit)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- ret = si_set_smc_sram_address(adev, smc_address, limit);
- if (ret == 0)
- WREG32(SMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
deleted file mode 100644
index d2930eceaf3c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef PP_SISLANDS_SMC_H
-#define PP_SISLANDS_SMC_H
-
-#include "ppsmc.h"
-
-#pragma pack(push, 1)
-
-#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-
-struct PP_SIslands_Dpm2PerfLevel
-{
- uint8_t MaxPS;
- uint8_t TgtAct;
- uint8_t MaxPS_StepInc;
- uint8_t MaxPS_StepDec;
- uint8_t PSSamplingTime;
- uint8_t NearTDPDec;
- uint8_t AboveSafeInc;
- uint8_t BelowSafeInc;
- uint8_t PSDeltaLimit;
- uint8_t PSDeltaWin;
- uint16_t PwrEfficiencyRatio;
- uint8_t Reserved[4];
-};
-
-typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
-
-struct PP_SIslands_DPM2Status
-{
- uint32_t dpm2Flags;
- uint8_t CurrPSkip;
- uint8_t CurrPSkipPowerShift;
- uint8_t CurrPSkipTDP;
- uint8_t CurrPSkipOCP;
- uint8_t MaxSPLLIndex;
- uint8_t MinSPLLIndex;
- uint8_t CurrSPLLIndex;
- uint8_t InfSweepMode;
- uint8_t InfSweepDir;
- uint8_t TDPexceeded;
- uint8_t reserved;
- uint8_t SwitchDownThreshold;
- uint32_t SwitchDownCounter;
- uint32_t SysScalingFactor;
-};
-
-typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
-
-struct PP_SIslands_DPM2Parameters
-{
- uint32_t TDPLimit;
- uint32_t NearTDPLimit;
- uint32_t SafePowerLimit;
- uint32_t PowerBoostLimit;
- uint32_t MinLimitDelta;
-};
-typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
-
-struct PP_SIslands_PAPMStatus
-{
- uint32_t EstimatedDGPU_T;
- uint32_t EstimatedDGPU_P;
- uint32_t EstimatedAPU_T;
- uint32_t EstimatedAPU_P;
- uint8_t dGPU_T_Limit_Exceeded;
- uint8_t reserved[3];
-};
-typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
-
-struct PP_SIslands_PAPMParameters
-{
- uint32_t NearTDPLimitTherm;
- uint32_t NearTDPLimitPAPM;
- uint32_t PlatformPowerLimit;
- uint32_t dGPU_T_Limit;
- uint32_t dGPU_T_Warning;
- uint32_t dGPU_T_Hysteresis;
-};
-typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
-
-struct SISLANDS_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
-
-struct SISLANDS_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
-
-struct SISLANDS_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t phase_settings;
-};
-
-typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
-
-struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t ACIndex;
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t UVDWatermark;
- uint8_t VCEWatermark;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint8_t padding;
- uint32_t aT;
- uint32_t bSP;
- SISLANDS_SMC_SCLK_VALUE sclk;
- SISLANDS_SMC_MCLK_VALUE mclk;
- SISLANDS_SMC_VOLTAGE_VALUE vddc;
- SISLANDS_SMC_VOLTAGE_VALUE mvdd;
- SISLANDS_SMC_VOLTAGE_VALUE vddci;
- SISLANDS_SMC_VOLTAGE_VALUE std_vddc;
- uint8_t hysteresisUp;
- uint8_t hysteresisDown;
- uint8_t stateFlags;
- uint8_t arbRefreshState;
- uint32_t SQPowerThrottle;
- uint32_t SQPowerThrottle_2;
- uint32_t MaxPoweredUpCU;
- SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc;
- SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc;
- uint32_t reserved[2];
- PP_SIslands_Dpm2PerfLevel dpm2;
-};
-
-#define SISLANDS_SMC_STROBE_RATIO 0x0F
-#define SISLANDS_SMC_STROBE_ENABLE 0x10
-
-#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01
-#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02
-#define SISLANDS_SMC_MC_RTT_ENABLE 0x04
-#define SISLANDS_SMC_MC_STUTTER_EN 0x08
-#define SISLANDS_SMC_MC_PG_EN 0x10
-
-typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-
-struct SISLANDS_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t levelCount;
- uint8_t padding2;
- uint8_t padding3;
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
-};
-
-typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
-
-#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
-#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
-#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
-#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
-#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
-
-struct SISLANDS_SMC_VOLTAGEMASKTABLE
-{
- uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
-
-#define SISLANDS_MAX_NO_VREG_STEPS 32
-
-struct SISLANDS_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
- SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
- PP_SIslands_DPM2Parameters dpm2Params;
- SISLANDS_SMC_SWSTATE initialState;
- SISLANDS_SMC_SWSTATE ACPIState;
- SISLANDS_SMC_SWSTATE ULVState;
- SISLANDS_SMC_SWSTATE driverState;
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
-};
-
-typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
-
-#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
-#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC
-#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28
-#define SI_SMC_SOFT_REGISTER_seq_index 0x5C
-#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60
-#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70
-#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78
-#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88
-#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C
-#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98
-#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8
-#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4
-#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
-#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
-#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
-#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
-#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
-#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
-#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
-#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
-
-struct PP_SIslands_FanTable
-{
- uint8_t fdo_mode;
- uint8_t padding;
- int16_t temp_min;
- int16_t temp_med;
- int16_t temp_max;
- int16_t slope1;
- int16_t slope2;
- int16_t fdo_min;
- int16_t hys_up;
- int16_t hys_down;
- int16_t hys_slope;
- int16_t temp_resp_lim;
- int16_t temp_curr;
- int16_t slope_curr;
- int16_t pwm_curr;
- uint32_t refresh_period;
- int16_t fdo_max;
- uint8_t temp_src;
- int8_t padding2;
-};
-
-typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
-
-#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
-#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
-
-#define SMC_SISLANDS_SCALE_I 7
-#define SMC_SISLANDS_SCALE_R 12
-
-struct PP_SIslands_CacConfig
-{
- uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
- uint32_t lkge_lut_V0;
- uint32_t lkge_lut_Vstep;
- uint32_t WinTime;
- uint32_t R_LL;
- uint32_t calculation_repeats;
- uint32_t l2numWin_TDP;
- uint32_t dc_cac;
- uint8_t lts_truncate_n;
- uint8_t SHIFT_N;
- uint8_t log2_PG_LKG_SCALE;
- uint8_t cac_temp;
- uint32_t lkge_lut_T0;
- uint32_t lkge_lut_Tstep;
-};
-
-typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
-
-#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
-#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
-
-struct SMC_SIslands_MCRegisterAddress
-{
- uint16_t s0;
- uint16_t s1;
-};
-
-typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
-
-struct SMC_SIslands_MCRegisterSet
-{
- uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
-
-struct SMC_SIslands_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
- SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
-};
-
-typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
-
-struct SMC_SIslands_MCArbDramTimingRegisterSet
-{
- uint32_t mc_arb_dram_timing;
- uint32_t mc_arb_dram_timing2;
- uint8_t mc_arb_rfsh_rate;
- uint8_t mc_arb_burst_time;
- uint8_t padding[2];
-};
-
-typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
-
-struct SMC_SIslands_MCArbDramTimingRegisters
-{
- uint8_t arb_current;
- uint8_t reserved[3];
- SMC_SIslands_MCArbDramTimingRegisterSet data[16];
-};
-
-typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
-
-struct SMC_SISLANDS_SPLL_DIV_TABLE
-{
- uint32_t freq[256];
- uint32_t ss[256];
-};
-
-#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff
-#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
-#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000
-#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25
-#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff
-#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0
-#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000
-#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20
-
-typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
-
-#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
-
-#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
-
-struct Smc_SIslands_DTE_Configuration
-{
- uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
- uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
- uint32_t K;
- uint32_t T0;
- uint32_t MaxT;
- uint8_t WindowSize;
- uint8_t Tdep_count;
- uint8_t temp_select;
- uint8_t DTE_mode;
- uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- uint32_t Tthreshold;
-};
-
-typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
-
-#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
-
-#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
-
-#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0
-#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4
-#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC
-#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10
-#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14
-#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18
-#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24
-#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
-#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38
-#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40
-#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48
-
-#pragma pack(pop)
-
-int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit);
-void amdgpu_si_start_smc(struct amdgpu_device *adev);
-void amdgpu_si_reset_smc(struct amdgpu_device *adev);
-int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev);
-void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable);
-bool amdgpu_si_is_smc_running(struct amdgpu_device *adev);
-PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
-PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev);
-int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
-int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 *value, u32 limit);
-int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 value, u32 limit);
-
-#endif
-