diff options
Diffstat (limited to 'kernel/cpu.c')
| -rw-r--r-- | kernel/cpu.c | 144 | 
1 files changed, 105 insertions, 39 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 88a7ede322bd..f6811c857102 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -592,7 +592,10 @@ static void lockdep_release_cpus_lock(void)  void __weak arch_smt_update(void) { }  #ifdef CONFIG_HOTPLUG_SMT +  enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; +static unsigned int cpu_smt_max_threads __ro_after_init; +unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX;  void __init cpu_smt_disable(bool force)  { @@ -606,16 +609,33 @@ void __init cpu_smt_disable(bool force)  		pr_info("SMT: disabled\n");  		cpu_smt_control = CPU_SMT_DISABLED;  	} +	cpu_smt_num_threads = 1;  }  /*   * The decision whether SMT is supported can only be done after the full   * CPU identification. Called from architecture code.   */ -void __init cpu_smt_check_topology(void) +void __init cpu_smt_set_num_threads(unsigned int num_threads, +				    unsigned int max_threads)  { -	if (!topology_smt_supported()) +	WARN_ON(!num_threads || (num_threads > max_threads)); + +	if (max_threads == 1)  		cpu_smt_control = CPU_SMT_NOT_SUPPORTED; + +	cpu_smt_max_threads = max_threads; + +	/* +	 * If SMT has been disabled via the kernel command line or SMT is +	 * not supported, set cpu_smt_num_threads to 1 for consistency. +	 * If enabled, take the architecture requested number of threads +	 * to bring up into account. +	 */ +	if (cpu_smt_control != CPU_SMT_ENABLED) +		cpu_smt_num_threads = 1; +	else if (num_threads < cpu_smt_num_threads) +		cpu_smt_num_threads = num_threads;  }  static int __init smt_cmdline_disable(char *str) @@ -625,9 +645,23 @@ static int __init smt_cmdline_disable(char *str)  }  early_param("nosmt", smt_cmdline_disable); +/* + * For Archicture supporting partial SMT states check if the thread is allowed. + * Otherwise this has already been checked through cpu_smt_max_threads when + * setting the SMT level. + */ +static inline bool cpu_smt_thread_allowed(unsigned int cpu) +{ +#ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC +	return topology_smt_thread_allowed(cpu); +#else +	return true; +#endif +} +  static inline bool cpu_smt_allowed(unsigned int cpu)  { -	if (cpu_smt_control == CPU_SMT_ENABLED) +	if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))  		return true;  	if (topology_is_primary_thread(cpu)) @@ -642,7 +676,7 @@ static inline bool cpu_smt_allowed(unsigned int cpu)  	return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);  } -/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */ +/* Returns true if SMT is supported and not forcefully (irreversibly) disabled */  bool cpu_smt_possible(void)  {  	return cpu_smt_control != CPU_SMT_FORCE_DISABLED && @@ -650,22 +684,8 @@ bool cpu_smt_possible(void)  }  EXPORT_SYMBOL_GPL(cpu_smt_possible); -static inline bool cpuhp_smt_aware(void) -{ -	return topology_smt_supported(); -} - -static inline const struct cpumask *cpuhp_get_primary_thread_mask(void) -{ -	return cpu_primary_thread_mask; -}  #else  static inline bool cpu_smt_allowed(unsigned int cpu) { return true; } -static inline bool cpuhp_smt_aware(void) { return false; } -static inline const struct cpumask *cpuhp_get_primary_thread_mask(void) -{ -	return cpu_present_mask; -}  #endif  static inline enum cpuhp_state @@ -1793,6 +1813,16 @@ static int __init parallel_bringup_parse_param(char *arg)  }  early_param("cpuhp.parallel", parallel_bringup_parse_param); +static inline bool cpuhp_smt_aware(void) +{ +	return cpu_smt_max_threads > 1; +} + +static inline const struct cpumask *cpuhp_get_primary_thread_mask(void) +{ +	return cpu_primary_thread_mask; +} +  /*   * On architectures which have enabled parallel bringup this invokes all BP   * prepare states for each of the to be onlined APs first. The last state @@ -2626,6 +2656,12 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)  	for_each_online_cpu(cpu) {  		if (topology_is_primary_thread(cpu))  			continue; +		/* +		 * Disable can be called with CPU_SMT_ENABLED when changing +		 * from a higher to lower number of SMT threads per core. +		 */ +		if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu)) +			continue;  		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);  		if (ret)  			break; @@ -2660,6 +2696,8 @@ int cpuhp_smt_enable(void)  		/* Skip online CPUs and CPUs on offline nodes */  		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))  			continue; +		if (!cpu_smt_thread_allowed(cpu)) +			continue;  		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);  		if (ret)  			break; @@ -2838,20 +2876,19 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {  #ifdef CONFIG_HOTPLUG_SMT +static bool cpu_smt_num_threads_valid(unsigned int threads) +{ +	if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC)) +		return threads >= 1 && threads <= cpu_smt_max_threads; +	return threads == 1 || threads == cpu_smt_max_threads; +} +  static ssize_t  __store_smt_control(struct device *dev, struct device_attribute *attr,  		    const char *buf, size_t count)  { -	int ctrlval, ret; - -	if (sysfs_streq(buf, "on")) -		ctrlval = CPU_SMT_ENABLED; -	else if (sysfs_streq(buf, "off")) -		ctrlval = CPU_SMT_DISABLED; -	else if (sysfs_streq(buf, "forceoff")) -		ctrlval = CPU_SMT_FORCE_DISABLED; -	else -		return -EINVAL; +	int ctrlval, ret, num_threads, orig_threads; +	bool force_off;  	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)  		return -EPERM; @@ -2859,21 +2896,39 @@ __store_smt_control(struct device *dev, struct device_attribute *attr,  	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)  		return -ENODEV; +	if (sysfs_streq(buf, "on")) { +		ctrlval = CPU_SMT_ENABLED; +		num_threads = cpu_smt_max_threads; +	} else if (sysfs_streq(buf, "off")) { +		ctrlval = CPU_SMT_DISABLED; +		num_threads = 1; +	} else if (sysfs_streq(buf, "forceoff")) { +		ctrlval = CPU_SMT_FORCE_DISABLED; +		num_threads = 1; +	} else if (kstrtoint(buf, 10, &num_threads) == 0) { +		if (num_threads == 1) +			ctrlval = CPU_SMT_DISABLED; +		else if (cpu_smt_num_threads_valid(num_threads)) +			ctrlval = CPU_SMT_ENABLED; +		else +			return -EINVAL; +	} else { +		return -EINVAL; +	} +  	ret = lock_device_hotplug_sysfs();  	if (ret)  		return ret; -	if (ctrlval != cpu_smt_control) { -		switch (ctrlval) { -		case CPU_SMT_ENABLED: -			ret = cpuhp_smt_enable(); -			break; -		case CPU_SMT_DISABLED: -		case CPU_SMT_FORCE_DISABLED: -			ret = cpuhp_smt_disable(ctrlval); -			break; -		} -	} +	orig_threads = cpu_smt_num_threads; +	cpu_smt_num_threads = num_threads; + +	force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED; + +	if (num_threads > orig_threads) +		ret = cpuhp_smt_enable(); +	else if (num_threads < orig_threads || force_off) +		ret = cpuhp_smt_disable(ctrlval);  	unlock_device_hotplug();  	return ret ? ret : count; @@ -2901,6 +2956,17 @@ static ssize_t control_show(struct device *dev,  {  	const char *state = smt_states[cpu_smt_control]; +#ifdef CONFIG_HOTPLUG_SMT +	/* +	 * If SMT is enabled but not all threads are enabled then show the +	 * number of threads. If all threads are enabled show "on". Otherwise +	 * show the state name. +	 */ +	if (cpu_smt_control == CPU_SMT_ENABLED && +	    cpu_smt_num_threads != cpu_smt_max_threads) +		return sysfs_emit(buf, "%d\n", cpu_smt_num_threads); +#endif +  	return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);  }  | 
