diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/acpi-cpufreq.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/amd_freq_sensitivity.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/cppc_cpufreq.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 19 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 6 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 16 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 120 | ||||
-rw-r--r-- | drivers/cpufreq/mediatek-cpufreq-hw.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/powernv-cpufreq.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/s3c2440-cpufreq.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/s5pv210-cpufreq.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/tegra186-cpufreq.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/tegra194-cpufreq.c | 8 |
13 files changed, 164 insertions, 27 deletions
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 28467d83c745..3d514b82d055 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -470,7 +470,8 @@ static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy, if (policy->cached_target_freq == target_freq) index = policy->cached_resolved_idx; else - index = cpufreq_table_find_index_dl(policy, target_freq); + index = cpufreq_table_find_index_dl(policy, target_freq, + false); entry = &policy->freq_table[index]; next_freq = entry->frequency; diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index d0b10baf039a..6448e03bcf48 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -91,7 +91,8 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy, unsigned int index; index = cpufreq_table_find_index_h(policy, - policy->cur - 1); + policy->cur - 1, + relation & CPUFREQ_RELATION_E); freq_next = policy->freq_table[index].frequency; } diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index d4c27022b9c9..db17196266e4 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -741,8 +741,6 @@ static int __init cppc_cpufreq_init(void) if ((acpi_disabled) || !acpi_cpc_valid()) return -ENODEV; - INIT_LIST_HEAD(&cpu_data_list); - cppc_check_hisi_workaround(); cppc_freq_invariance_init(); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 5782b15a8caa..e338d2f010fe 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -554,7 +554,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy, unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsigned int target_freq) { - return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_L); + return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE); } EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); @@ -2260,8 +2260,16 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS)) return 0; - if (cpufreq_driver->target) + if (cpufreq_driver->target) { + /* + * If the driver hasn't setup a single inefficient frequency, + * it's unlikely it knows how to decode CPUFREQ_RELATION_E. + */ + if (!policy->efficiencies_available) + relation &= ~CPUFREQ_RELATION_E; + return cpufreq_driver->target(policy, target_freq, relation); + } if (!cpufreq_driver->target_index) return -EINVAL; @@ -2523,8 +2531,15 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, if (ret) return ret; + /* + * Resolve policy min/max to available frequencies. It ensures + * no frequency resolution will neither overshoot the requested maximum + * nor undershoot the requested minimum. + */ policy->min = new_data.min; policy->max = new_data.max; + policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L); + policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H); trace_cpu_frequency_limits(policy); policy->cached_target_freq = UINT_MAX; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index aa39ff31ec9f..0879ec3c170c 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -111,7 +111,8 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy) if (requested_freq > policy->max) requested_freq = policy->max; - __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, requested_freq, + CPUFREQ_RELATION_HE); dbs_info->requested_freq = requested_freq; goto out; } @@ -134,7 +135,8 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy) else requested_freq = policy->min; - __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, requested_freq, + CPUFREQ_RELATION_LE); dbs_info->requested_freq = requested_freq; } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index eb4320b619c9..3b8f924771b4 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -83,9 +83,11 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, freq_avg = freq_req - freq_reduc; /* Find freq bounds for freq_avg in freq_table */ - index = cpufreq_table_find_index_h(policy, freq_avg); + index = cpufreq_table_find_index_h(policy, freq_avg, + relation & CPUFREQ_RELATION_E); freq_lo = freq_table[index].frequency; - index = cpufreq_table_find_index_l(policy, freq_avg); + index = cpufreq_table_find_index_l(policy, freq_avg, + relation & CPUFREQ_RELATION_E); freq_hi = freq_table[index].frequency; /* Find out how long we have to be in hi and lo freqs */ @@ -118,12 +120,12 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) if (od_tuners->powersave_bias) freq = od_ops.powersave_bias_target(policy, freq, - CPUFREQ_RELATION_H); + CPUFREQ_RELATION_HE); else if (policy->cur == policy->max) return; __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ? - CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); + CPUFREQ_RELATION_LE : CPUFREQ_RELATION_HE); } /* @@ -161,9 +163,9 @@ static void od_update(struct cpufreq_policy *policy) if (od_tuners->powersave_bias) freq_next = od_ops.powersave_bias_target(policy, freq_next, - CPUFREQ_RELATION_L); + CPUFREQ_RELATION_LE); - __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C); + __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_CE); } } @@ -182,7 +184,7 @@ static unsigned int od_dbs_update(struct cpufreq_policy *policy) */ if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) { __cpufreq_driver_target(policy, dbs_info->freq_lo, - CPUFREQ_RELATION_H); + CPUFREQ_RELATION_HE); return dbs_info->freq_lo_delay_us; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 8c176b7dae41..349ddbaef796 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -32,6 +32,7 @@ #include <asm/cpu_device_id.h> #include <asm/cpufeature.h> #include <asm/intel-family.h> +#include "../drivers/thermal/intel/thermal_interrupt.h" #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) @@ -219,6 +220,7 @@ struct global_params { * @sched_flags: Store scheduler flags for possible cross CPU update * @hwp_boost_min: Last HWP boosted min performance * @suspended: Whether or not the driver has been suspended. + * @hwp_notify_work: workqueue for HWP notifications. * * This structure stores per CPU instance data for all CPUs. */ @@ -257,6 +259,7 @@ struct cpudata { unsigned int sched_flags; u32 hwp_boost_min; bool suspended; + struct delayed_work hwp_notify_work; }; static struct cpudata **all_cpu_data; @@ -537,7 +540,8 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) * scaling factor is too high, so recompute it to make the HWP_CAP * highest performance correspond to the maximum turbo frequency. */ - if (turbo_freq < cpu->pstate.turbo_pstate * scaling) { + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; + if (turbo_freq < cpu->pstate.turbo_freq) { cpu->pstate.turbo_freq = turbo_freq; scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); cpu->pstate.scaling = scaling; @@ -985,11 +989,15 @@ skip_epp: wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); } +static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata); + static void intel_pstate_hwp_offline(struct cpudata *cpu) { u64 value = READ_ONCE(cpu->hwp_req_cached); int min_perf; + intel_pstate_disable_hwp_interrupt(cpu); + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { /* * In case the EPP has been set to "performance" by the @@ -1053,6 +1061,9 @@ static int intel_pstate_suspend(struct cpufreq_policy *policy) cpu->suspended = true; + /* disable HWP interrupt and cancel any pending work */ + intel_pstate_disable_hwp_interrupt(cpu); + return 0; } @@ -1546,15 +1557,105 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) /************************** sysfs end ************************/ +static void intel_pstate_notify_work(struct work_struct *work) +{ + struct cpudata *cpudata = + container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); + + cpufreq_update_policy(cpudata->cpu); + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); +} + +static DEFINE_SPINLOCK(hwp_notify_lock); +static cpumask_t hwp_intr_enable_mask; + +void notify_hwp_interrupt(void) +{ + unsigned int this_cpu = smp_processor_id(); + struct cpudata *cpudata; + unsigned long flags; + u64 value; + + if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + return; + + rdmsrl_safe(MSR_HWP_STATUS, &value); + if (!(value & 0x01)) + return; + + spin_lock_irqsave(&hwp_notify_lock, flags); + + if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) + goto ack_intr; + + /* + * Currently we never free all_cpu_data. And we can't reach here + * without this allocated. But for safety for future changes, added + * check. + */ + if (unlikely(!READ_ONCE(all_cpu_data))) + goto ack_intr; + + /* + * The free is done during cleanup, when cpufreq registry is failed. + * We wouldn't be here if it fails on init or switch status. But for + * future changes, added check. + */ + cpudata = READ_ONCE(all_cpu_data[this_cpu]); + if (unlikely(!cpudata)) + goto ack_intr; + + schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10)); + + spin_unlock_irqrestore(&hwp_notify_lock, flags); + + return; + +ack_intr: + wrmsrl_safe(MSR_HWP_STATUS, 0); + spin_unlock_irqrestore(&hwp_notify_lock, flags); +} + +static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) +{ + unsigned long flags; + + /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + + spin_lock_irqsave(&hwp_notify_lock, flags); + if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) + cancel_delayed_work(&cpudata->hwp_notify_work); + spin_unlock_irqrestore(&hwp_notify_lock, flags); +} + +static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) +{ + /* Enable HWP notification interrupt for guaranteed performance change */ + if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { + unsigned long flags; + + spin_lock_irqsave(&hwp_notify_lock, flags); + INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); + cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); + spin_unlock_irqrestore(&hwp_notify_lock, flags); + + /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); + } +} + static void intel_pstate_hwp_enable(struct cpudata *cpudata) { - /* First disable HWP notification interrupt as we don't process them */ + /* First disable HWP notification interrupt till we activate again */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); if (cpudata->epp_default == -EINVAL) cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); + + intel_pstate_enable_hwp_interrupt(cpudata); } static int atom_get_min_pstate(void) @@ -2266,7 +2367,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) if (!cpu) return -ENOMEM; - all_cpu_data[cpunum] = cpu; + WRITE_ONCE(all_cpu_data[cpunum], cpu); cpu->cpu = cpunum; @@ -2929,8 +3030,10 @@ static void intel_pstate_driver_cleanup(void) if (intel_pstate_driver == &intel_pstate) intel_pstate_clear_update_util_hook(cpu); + spin_lock(&hwp_notify_lock); kfree(all_cpu_data[cpu]); - all_cpu_data[cpu] = NULL; + WRITE_ONCE(all_cpu_data[cpu], NULL); + spin_unlock(&hwp_notify_lock); } } cpus_read_unlock(); @@ -3199,6 +3302,7 @@ static bool intel_pstate_hwp_is_enabled(void) static int __init intel_pstate_init(void) { + static struct cpudata **_all_cpu_data; const struct x86_cpu_id *id; int rc; @@ -3224,7 +3328,7 @@ static int __init intel_pstate_init(void) * deal with it. */ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { - hwp_active++; + WRITE_ONCE(hwp_active, 1); hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; intel_cpufreq.attr = hwp_cpufreq_attrs; @@ -3275,10 +3379,12 @@ hwp_cpu_matched: pr_info("Intel P-state driver initializing\n"); - all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); - if (!all_cpu_data) + _all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); + if (!_all_cpu_data) return -ENOMEM; + WRITE_ONCE(all_cpu_data, _all_cpu_data); + intel_pstate_request_control_from_smm(); intel_pstate_sysfs_expose_params(); diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index 0cf18dd46b92..8ddbd0c5ce37 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -109,7 +109,7 @@ static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, struct mtk_cpufreq_data *data = policy->driver_data; unsigned int index; - index = cpufreq_table_find_index_dl(policy, target_freq); + index = cpufreq_table_find_index_dl(policy, target_freq, false); writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]); diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 5a2cf5f91ccb..fddbd1ea1635 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -934,7 +934,7 @@ static void powernv_cpufreq_work_fn(struct work_struct *work) policy = cpufreq_cpu_get(cpu); if (!policy) continue; - index = cpufreq_table_find_index_c(policy, policy->cur); + index = cpufreq_table_find_index_c(policy, policy->cur, false); powernv_cpufreq_target_index(policy, index); cpumask_andnot(&mask, &mask, policy->cpus); cpufreq_cpu_put(policy); @@ -1022,7 +1022,7 @@ static unsigned int powernv_fast_switch(struct cpufreq_policy *policy, int index; struct powernv_smp_call_data freq_data; - index = cpufreq_table_find_index_dl(policy, target_freq); + index = cpufreq_table_find_index_dl(policy, target_freq, false); freq_data.pstate_id = powernv_freqs[index].driver_data; freq_data.gpstate_id = powernv_freqs[index].driver_data; set_pstate(&freq_data); diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c index 148e8aedefa9..2011fb9c03a4 100644 --- a/drivers/cpufreq/s3c2440-cpufreq.c +++ b/drivers/cpufreq/s3c2440-cpufreq.c @@ -173,12 +173,14 @@ static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) case 6: camdiv |= S3C2440_CAMDIVN_HCLK3_HALF; + fallthrough; case 3: clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6; break; case 8: camdiv |= S3C2440_CAMDIVN_HCLK4_HALF; + fallthrough; case 4: clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8; break; diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index ad7d4f272ddc..76c888ed8d16 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -243,7 +243,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) new_freq = s5pv210_freq_table[index].frequency; /* Finding current running level index */ - priv_index = cpufreq_table_find_index_h(policy, old_freq); + priv_index = cpufreq_table_find_index_h(policy, old_freq, false); arm_volt = dvs_conf[index].arm_volt; int_volt = dvs_conf[index].int_volt; diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 5d1943e787b0..6c88827f4e62 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -159,6 +159,10 @@ static struct cpufreq_frequency_table *init_vhint_table( table = ERR_PTR(err); goto free; } + if (msg.rx.ret) { + table = ERR_PTR(-EINVAL); + goto free; + } for (i = data->vfloor; i <= data->vceil; i++) { u16 ndiv = data->ndiv[i]; diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index a9620e4489ae..ac381db25dbe 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -242,7 +242,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true); - if (cl >= data->num_clusters) + if (cl >= data->num_clusters || !data->tables[cl]) return -EINVAL; /* set same policy for all cpus in a cluster */ @@ -310,6 +310,12 @@ init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp, err = tegra_bpmp_transfer(bpmp, &msg); if (err) return ERR_PTR(err); + if (msg.rx.ret == -BPMP_EINVAL) { + /* Cluster not available */ + return NULL; + } + if (msg.rx.ret) + return ERR_PTR(-EINVAL); /* * Make sure frequency table step is a multiple of mdiv to match |