From c227233ad64c77e57db738ab0e46439db71822a3 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 17 Sep 2021 10:20:22 +0300 Subject: intel_idle: enable interrupts before C1 on Xeons Enable local interrupts before requesting C1 on the last two generations of Intel Xeon platforms: Sky Lake, Cascade Lake, Cooper Lake, Ice Lake. This decreases average C1 interrupt latency by about 5-10%, as measured with the 'wult' tool. The '->enter()' function of the driver enters C-states with local interrupts disabled by executing the 'monitor' and 'mwait' pair of instructions. If an interrupt happens, the CPU exits the C-state and continues executing instructions after 'mwait'. It does not jump to the interrupt handler, because local interrupts are disabled. The cpuidle subsystem enables interrupts a bit later, after doing some housekeeping. With this patch, we enable local interrupts before requesting C1. In this case, if the CPU wakes up because of an interrupt, it will jump to the interrupt handler right away. The cpuidle housekeeping will be done after the pending interrupt(s) are handled. Enabling interrupts before entering a C-state has measurable impact for faster C-states, like C1. Deeper, but slower C-states like C6 do not really benefit from this sort of change, because their latency is a lot higher comparing to the delay added by cpuidle housekeeping. This change was also tested with cyclictest and dbench. In case of Ice Lake, the average cyclictest latency decreased by 5.1%, and the average 'dbench' throughput increased by about 0.8%. Both tests were run for 4 hours with only C1 enabled (all other idle states, including 'POLL', were disabled). CPU frequency was pinned to HFM, and uncore frequency was pinned to the maximum value. The other platforms had similar single-digit percentage improvements. It is worth noting that this patch affects 'cpuidle' statistics a tiny bit. Before this patch, C1 residency did not include the interrupt handling time, but with this patch, it will include it. This is similar to what happens in case of the 'POLL' state, which also runs with interrupts enabled. Suggested-by: Len Brown Signed-off-by: Artem Bityutskiy Signed-off-by: Rafael J. Wysocki --- drivers/idle/intel_idle.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index e6c543b5ee1d..0b66e25c0e2d 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -88,6 +88,12 @@ static struct cpuidle_state *cpuidle_state_table __initdata; static unsigned int mwait_substates __initdata; +/* + * Enable interrupts before entering the C-state. On some platforms and for + * some C-states, this may measurably decrease interrupt latency. + */ +#define CPUIDLE_FLAG_IRQ_ENABLE BIT(14) + /* * Enable this state by default even if the ACPI _CST does not list it. */ @@ -127,6 +133,9 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, unsigned long eax = flg2MWAIT(state->flags); unsigned long ecx = 1; /* break on interrupt flag */ + if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) + local_irq_enable(); + mwait_idle_with_hints(eax, ecx); return index; @@ -698,7 +707,7 @@ static struct cpuidle_state skx_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", - .flags = MWAIT2flg(0x00), + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, @@ -727,7 +736,7 @@ static struct cpuidle_state icx_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", - .flags = MWAIT2flg(0x00), + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, -- cgit v1.2.3-70-g09d2 From e5f5a66c9aa9c331da5527c2e3fd9394e7091e01 Mon Sep 17 00:00:00 2001 From: Anel Orazgaliyeva Date: Mon, 6 Sep 2021 18:34:40 +0000 Subject: cpuidle: Fix kobject memory leaks in error paths Commit c343bf1ba5ef ("cpuidle: Fix three reference count leaks") fixes the cleanup of kobjects; however, it removes kfree() calls altogether, leading to memory leaks. Fix those and also defer the initialization of dev->kobj_dev until after the error check, so that we do not end up with a dangling pointer. Fixes: c343bf1ba5ef ("cpuidle: Fix three reference count leaks") Signed-off-by: Anel Orazgaliyeva Suggested-by: Aman Priyadarshi [ rjw: Subject edits ] Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/sysfs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 53ec9585ccd4..469e18547d06 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -488,6 +488,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) &kdev->kobj, "state%d", i); if (ret) { kobject_put(&kobj->kobj); + kfree(kobj); goto error_state; } cpuidle_add_s2idle_attr_group(kobj); @@ -619,6 +620,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) &kdev->kobj, "driver"); if (ret) { kobject_put(&kdrv->kobj); + kfree(kdrv); return ret; } @@ -705,7 +707,6 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) if (!kdev) return -ENOMEM; kdev->dev = dev; - dev->kobj_dev = kdev; init_completion(&kdev->kobj_unregister); @@ -713,9 +714,11 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) "cpuidle"); if (error) { kobject_put(&kdev->kobj); + kfree(kdev); return error; } + dev->kobj_dev = kdev; kobject_uevent(&kdev->kobj, KOBJ_ADD); return 0; -- cgit v1.2.3-70-g09d2 From 45b2bb66209c723121158bd497e25645ee6197de Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Thu, 9 Sep 2021 11:47:14 -0700 Subject: cpufreq: vexpress: Drop unused variable arm:allmodconfig fails to build with the following error. drivers/cpufreq/vexpress-spc-cpufreq.c:454:13: error: unused variable 'cur_cluster' Remove the unused variable. Fixes: bb8c26d9387f ("cpufreq: vexpress: Set CPUFREQ_IS_COOLING_DEV flag") Cc: Viresh Kumar Signed-off-by: Guenter Roeck Reviewed-by: Kees Cook Signed-off-by: Viresh Kumar --- drivers/cpufreq/vexpress-spc-cpufreq.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 284b6bd040b1..d295f405c4bb 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -451,7 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev; - int cur_cluster = cpu_to_cluster(policy->cpu); cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { -- cgit v1.2.3-70-g09d2 From 08ef8d35a826ccf3ddddf7bfb7d84aaa22e8a4c4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 27 Sep 2021 11:51:44 +0200 Subject: cpufreq: s3c244x: add fallthrough comments for switch Apparently nobody has so far caught this warning, I hit it in randconfig build testing: drivers/cpufreq/s3c2440-cpufreq.c: In function 's3c2440_cpufreq_setdivs': drivers/cpufreq/s3c2440-cpufreq.c:175:10: error: this statement may fall through [-Werror=implicit-fallthrough=] camdiv |= S3C2440_CAMDIVN_HCLK3_HALF; ^ drivers/cpufreq/s3c2440-cpufreq.c:176:2: note: here case 3: ^~~~ drivers/cpufreq/s3c2440-cpufreq.c:181:10: error: this statement may fall through [-Werror=implicit-fallthrough=] camdiv |= S3C2440_CAMDIVN_HCLK4_HALF; ^ drivers/cpufreq/s3c2440-cpufreq.c:182:2: note: here case 4: ^~~~ Both look like the fallthrough is intentional, so add the new "fallthrough;" keyword. Signed-off-by: Arnd Bergmann Acked-by: Krzysztof Kozlowski Signed-off-by: Viresh Kumar --- drivers/cpufreq/s3c2440-cpufreq.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c index 148e8aedefa9..2011fb9c03a4 100644 --- a/drivers/cpufreq/s3c2440-cpufreq.c +++ b/drivers/cpufreq/s3c2440-cpufreq.c @@ -173,12 +173,14 @@ static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) case 6: camdiv |= S3C2440_CAMDIVN_HCLK3_HALF; + fallthrough; case 3: clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6; break; case 8: camdiv |= S3C2440_CAMDIVN_HCLK4_HALF; + fallthrough; case 4: clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8; break; -- cgit v1.2.3-70-g09d2 From 6065a672679f0502ede024a03db82c03534a5e00 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Sun, 15 Aug 2021 21:07:29 +0800 Subject: cpufreq: remove useless INIT_LIST_HEAD() list cpu_data_list has been inited staticly through LIST_HEAD, so there's no need to call another INIT_LIST_HEAD. Simply remove it from cppc_cpufreq_init. Signed-off-by: Han Wang Signed-off-by: Viresh Kumar --- drivers/cpufreq/cppc_cpufreq.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index d4c27022b9c9..db17196266e4 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -741,8 +741,6 @@ static int __init cppc_cpufreq_init(void) if ((acpi_disabled) || !acpi_cpc_valid()) return -ENODEV; - INIT_LIST_HEAD(&cpu_data_list); - cppc_check_hisi_workaround(); cppc_freq_invariance_init(); -- cgit v1.2.3-70-g09d2 From c2ace21f937a0c7a296d233b4441b99b66579bda Mon Sep 17 00:00:00 2001 From: Mikko Perttunen Date: Wed, 15 Sep 2021 11:55:16 +0300 Subject: cpufreq: tegra186/tegra194: Handle errors in BPMP response The return value from tegra_bpmp_transfer indicates the success or failure of the IPC transaction with BPMP. If the transaction succeeded, we also need to check the actual command's result code. Add code to do this. While at it, explicitly handle missing CPU clusters, which can occur on floorswept chips. This worked before as well, but possibly only by accident. Signed-off-by: Mikko Perttunen Signed-off-by: Viresh Kumar --- drivers/cpufreq/tegra186-cpufreq.c | 4 ++++ drivers/cpufreq/tegra194-cpufreq.c | 8 +++++++- 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 5d1943e787b0..6c88827f4e62 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -159,6 +159,10 @@ static struct cpufreq_frequency_table *init_vhint_table( table = ERR_PTR(err); goto free; } + if (msg.rx.ret) { + table = ERR_PTR(-EINVAL); + goto free; + } for (i = data->vfloor; i <= data->vceil; i++) { u16 ndiv = data->ndiv[i]; diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index a9620e4489ae..ac381db25dbe 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -242,7 +242,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true); - if (cl >= data->num_clusters) + if (cl >= data->num_clusters || !data->tables[cl]) return -EINVAL; /* set same policy for all cpus in a cluster */ @@ -310,6 +310,12 @@ init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp, err = tegra_bpmp_transfer(bpmp, &msg); if (err) return ERR_PTR(err); + if (msg.rx.ret == -BPMP_EINVAL) { + /* Cluster not available */ + return NULL; + } + if (msg.rx.ret) + return ERR_PTR(-EINVAL); /* * Make sure frequency table step is a multiple of mdiv to match -- cgit v1.2.3-70-g09d2 From 57577c996d731ce1e5a4a488e64e6e201b360847 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 28 Sep 2021 09:42:17 -0700 Subject: cpufreq: intel_pstate: Process HWP Guaranteed change notification It is possible that HWP guaranteed ratio is changed in response to change in power and thermal limits. For example when Intel Speed Select performance profile is changed or there is change in TDP, hardware can send notifications. It is possible that the guaranteed ratio is increased. This creates an issue when turbo is disabled, as the old limits set in MSR_HWP_REQUEST are still lower and hardware will clip to older limits. This change enables HWP interrupt and process HWP interrupts. When guaranteed is changed, calls cpufreq_update_policy() so that driver callbacks are called to update to new HWP limits. This callback is called from a delayed workqueue of 10ms to avoid frequent updates. Although the scope of IA32_HWP_INTERRUPT is per logical cpu, on some plaforms interrupt is generated on all CPUs. This is particularly a problem during initialization, when the driver didn't allocated data for other CPUs. So this change uses a cpumask of enabled CPUs and process interrupts on those CPUs only. When the cpufreq offline() or suspend() callback is called, HWP interrupt is disabled on those CPUs and also cancels any pending work item. Spin lock is used to protect data and processing shared with interrupt handler. Here READ_ONCE(), WRITE_ONCE() macros are used to designate shared data, even though spin lock act as an optimization barrier here. Signed-off-by: Srinivas Pandruvada Tested-by: pablomh@gmail.com Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 117 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 111 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 8c176b7dae41..facc56dd58dd 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -32,6 +32,7 @@ #include #include #include +#include "../drivers/thermal/intel/thermal_interrupt.h" #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) @@ -219,6 +220,7 @@ struct global_params { * @sched_flags: Store scheduler flags for possible cross CPU update * @hwp_boost_min: Last HWP boosted min performance * @suspended: Whether or not the driver has been suspended. + * @hwp_notify_work: workqueue for HWP notifications. * * This structure stores per CPU instance data for all CPUs. */ @@ -257,6 +259,7 @@ struct cpudata { unsigned int sched_flags; u32 hwp_boost_min; bool suspended; + struct delayed_work hwp_notify_work; }; static struct cpudata **all_cpu_data; @@ -985,11 +988,15 @@ skip_epp: wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); } +static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata); + static void intel_pstate_hwp_offline(struct cpudata *cpu) { u64 value = READ_ONCE(cpu->hwp_req_cached); int min_perf; + intel_pstate_disable_hwp_interrupt(cpu); + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { /* * In case the EPP has been set to "performance" by the @@ -1053,6 +1060,9 @@ static int intel_pstate_suspend(struct cpufreq_policy *policy) cpu->suspended = true; + /* disable HWP interrupt and cancel any pending work */ + intel_pstate_disable_hwp_interrupt(cpu); + return 0; } @@ -1546,15 +1556,105 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) /************************** sysfs end ************************/ +static void intel_pstate_notify_work(struct work_struct *work) +{ + struct cpudata *cpudata = + container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); + + cpufreq_update_policy(cpudata->cpu); + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); +} + +static DEFINE_SPINLOCK(hwp_notify_lock); +static cpumask_t hwp_intr_enable_mask; + +void notify_hwp_interrupt(void) +{ + unsigned int this_cpu = smp_processor_id(); + struct cpudata *cpudata; + unsigned long flags; + u64 value; + + if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + return; + + rdmsrl_safe(MSR_HWP_STATUS, &value); + if (!(value & 0x01)) + return; + + spin_lock_irqsave(&hwp_notify_lock, flags); + + if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) + goto ack_intr; + + /* + * Currently we never free all_cpu_data. And we can't reach here + * without this allocated. But for safety for future changes, added + * check. + */ + if (unlikely(!READ_ONCE(all_cpu_data))) + goto ack_intr; + + /* + * The free is done during cleanup, when cpufreq registry is failed. + * We wouldn't be here if it fails on init or switch status. But for + * future changes, added check. + */ + cpudata = READ_ONCE(all_cpu_data[this_cpu]); + if (unlikely(!cpudata)) + goto ack_intr; + + schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10)); + + spin_unlock_irqrestore(&hwp_notify_lock, flags); + + return; + +ack_intr: + wrmsrl_safe(MSR_HWP_STATUS, 0); + spin_unlock_irqrestore(&hwp_notify_lock, flags); +} + +static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) +{ + unsigned long flags; + + /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + + spin_lock_irqsave(&hwp_notify_lock, flags); + if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) + cancel_delayed_work(&cpudata->hwp_notify_work); + spin_unlock_irqrestore(&hwp_notify_lock, flags); +} + +static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) +{ + /* Enable HWP notification interrupt for guaranteed performance change */ + if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { + unsigned long flags; + + spin_lock_irqsave(&hwp_notify_lock, flags); + INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); + cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); + spin_unlock_irqrestore(&hwp_notify_lock, flags); + + /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); + } +} + static void intel_pstate_hwp_enable(struct cpudata *cpudata) { - /* First disable HWP notification interrupt as we don't process them */ + /* First disable HWP notification interrupt till we activate again */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); if (cpudata->epp_default == -EINVAL) cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); + + intel_pstate_enable_hwp_interrupt(cpudata); } static int atom_get_min_pstate(void) @@ -2266,7 +2366,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) if (!cpu) return -ENOMEM; - all_cpu_data[cpunum] = cpu; + WRITE_ONCE(all_cpu_data[cpunum], cpu); cpu->cpu = cpunum; @@ -2929,8 +3029,10 @@ static void intel_pstate_driver_cleanup(void) if (intel_pstate_driver == &intel_pstate) intel_pstate_clear_update_util_hook(cpu); + spin_lock(&hwp_notify_lock); kfree(all_cpu_data[cpu]); - all_cpu_data[cpu] = NULL; + WRITE_ONCE(all_cpu_data[cpu], NULL); + spin_unlock(&hwp_notify_lock); } } cpus_read_unlock(); @@ -3199,6 +3301,7 @@ static bool intel_pstate_hwp_is_enabled(void) static int __init intel_pstate_init(void) { + static struct cpudata **_all_cpu_data; const struct x86_cpu_id *id; int rc; @@ -3224,7 +3327,7 @@ static int __init intel_pstate_init(void) * deal with it. */ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { - hwp_active++; + WRITE_ONCE(hwp_active, 1); hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; intel_cpufreq.attr = hwp_cpufreq_attrs; @@ -3275,10 +3378,12 @@ hwp_cpu_matched: pr_info("Intel P-state driver initializing\n"); - all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); - if (!all_cpu_data) + _all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); + if (!_all_cpu_data) return -ENOMEM; + WRITE_ONCE(all_cpu_data, _all_cpu_data); + intel_pstate_request_control_from_smm(); intel_pstate_sysfs_expose_params(); -- cgit v1.2.3-70-g09d2 From c72bcf0ab87a92634e58af62e89af0f40dfd0b88 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Tue, 26 Oct 2021 16:32:42 +0800 Subject: cpufreq: intel_pstate: Fix cpu->pstate.turbo_freq initialization Fix a problem in active mode that cpu->pstate.turbo_freq is initialized only if HWP-to-frequency scaling factor is refined. In passive mode, this problem is not exposed, because cpu->pstate.turbo_freq is set again, later in intel_cpufreq_cpu_init()->intel_pstate_get_hwp_cap(). Fixes: eb3693f0521e ("cpufreq: intel_pstate: hybrid: CPU-specific scaling factor") Signed-off-by: Zhang Rui Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index facc56dd58dd..349ddbaef796 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -540,7 +540,8 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) * scaling factor is too high, so recompute it to make the HWP_CAP * highest performance correspond to the maximum turbo frequency. */ - if (turbo_freq < cpu->pstate.turbo_pstate * scaling) { + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; + if (turbo_freq < cpu->pstate.turbo_freq) { cpu->pstate.turbo_freq = turbo_freq; scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); cpu->pstate.scaling = scaling; -- cgit v1.2.3-70-g09d2