diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/amd-pstate.c | 30 | ||||
-rw-r--r-- | drivers/cpufreq/cppc_cpufreq.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 16 |
3 files changed, 33 insertions, 15 deletions
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 15e201d5e911..b63863f77c67 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -536,11 +536,16 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy) static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) { - u32 max_limit_perf, min_limit_perf, lowest_perf; + u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf; struct amd_cpudata *cpudata = policy->driver_data; - max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); - min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); + if (cpudata->boost_supported && !policy->boost_enabled) + max_perf = READ_ONCE(cpudata->nominal_perf); + else + max_perf = READ_ONCE(cpudata->highest_perf); + + max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq); + min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq); lowest_perf = READ_ONCE(cpudata->lowest_perf); if (min_limit_perf < lowest_perf) @@ -1201,11 +1206,21 @@ static int amd_pstate_register_driver(int mode) return -EINVAL; cppc_state = mode; + + ret = amd_pstate_enable(true); + if (ret) { + pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n", + ret); + amd_pstate_driver_cleanup(); + return ret; + } + ret = cpufreq_register_driver(current_pstate_driver); if (ret) { amd_pstate_driver_cleanup(); return ret; } + return 0; } @@ -1496,10 +1511,13 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) u64 value; s16 epp; - max_perf = READ_ONCE(cpudata->highest_perf); + if (cpudata->boost_supported && !policy->boost_enabled) + max_perf = READ_ONCE(cpudata->nominal_perf); + else + max_perf = READ_ONCE(cpudata->highest_perf); min_perf = READ_ONCE(cpudata->lowest_perf); - max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); - min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); + max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq); + min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq); if (min_limit_perf < min_perf) min_limit_perf = min_perf; diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 1a5ad184d28f..2b8708475ac7 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -22,7 +22,7 @@ #include <linux/vmalloc.h> #include <uapi/linux/sched/types.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <acpi/cppc_acpi.h> diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index aaea9a39eced..b0018f371ea3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } -static DEFINE_SPINLOCK(hwp_notify_lock); +static DEFINE_RAW_SPINLOCK(hwp_notify_lock); static cpumask_t hwp_intr_enable_mask; #define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0) @@ -1868,7 +1868,7 @@ void notify_hwp_interrupt(void) if (!(value & status_mask)) return; - spin_lock_irqsave(&hwp_notify_lock, flags); + raw_spin_lock_irqsave(&hwp_notify_lock, flags); if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) goto ack_intr; @@ -1876,13 +1876,13 @@ void notify_hwp_interrupt(void) schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work, msecs_to_jiffies(10)); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); return; ack_intr: wrmsrl_safe(MSR_HWP_STATUS, 0); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); } static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) @@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); - spin_lock_irq(&hwp_notify_lock); + raw_spin_lock_irq(&hwp_notify_lock); cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); - spin_unlock_irq(&hwp_notify_lock); + raw_spin_unlock_irq(&hwp_notify_lock); if (cancel_work) cancel_delayed_work_sync(&cpudata->hwp_notify_work); @@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ; - spin_lock_irq(&hwp_notify_lock); + raw_spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); - spin_unlock_irq(&hwp_notify_lock); + raw_spin_unlock_irq(&hwp_notify_lock); if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; |