diff options
Diffstat (limited to 'drivers/idle/intel_idle.c')
| -rw-r--r-- | drivers/idle/intel_idle.c | 480 | 
1 files changed, 330 insertions, 150 deletions
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 75fd2a7b0842..9f38ff02a7b8 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -41,6 +41,7 @@  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/acpi.h>  #include <linux/kernel.h>  #include <linux/cpuidle.h>  #include <linux/tick.h> @@ -79,6 +80,7 @@ struct idle_cpu {  	unsigned long auto_demotion_disable_flags;  	bool byt_auto_demotion_disable_flag;  	bool disable_promotion_to_c1e; +	bool use_acpi;  };  static const struct idle_cpu *icpu; @@ -90,6 +92,11 @@ static void intel_idle_s2idle(struct cpuidle_device *dev,  static struct cpuidle_state *cpuidle_state_table;  /* + * Enable this state by default even if the ACPI _CST does not list it. + */ +#define CPUIDLE_FLAG_ALWAYS_ENABLE	BIT(15) + +/*   * Set this flag for states where the HW flushes the TLB for us   * and so we don't need cross-calls to keep it consistent.   * If this flag is set, SW flushes the TLB, so even if the @@ -124,7 +131,7 @@ static struct cpuidle_state nehalem_cstates[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 20,  		.enter = &intel_idle, @@ -161,7 +168,7 @@ static struct cpuidle_state snb_cstates[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 20,  		.enter = &intel_idle, @@ -296,7 +303,7 @@ static struct cpuidle_state ivb_cstates[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 20,  		.enter = &intel_idle, @@ -341,7 +348,7 @@ static struct cpuidle_state ivt_cstates[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 80,  		.enter = &intel_idle, @@ -378,7 +385,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 250,  		.enter = &intel_idle, @@ -415,7 +422,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 500,  		.enter = &intel_idle, @@ -452,7 +459,7 @@ static struct cpuidle_state hsw_cstates[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 20,  		.enter = &intel_idle, @@ -520,7 +527,7 @@ static struct cpuidle_state bdw_cstates[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 20,  		.enter = &intel_idle, @@ -589,7 +596,7 @@ static struct cpuidle_state skl_cstates[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 20,  		.enter = &intel_idle, @@ -658,7 +665,7 @@ static struct cpuidle_state skx_cstates[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 20,  		.enter = &intel_idle, @@ -808,7 +815,7 @@ static struct cpuidle_state bxt_cstates[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 20,  		.enter = &intel_idle, @@ -869,7 +876,7 @@ static struct cpuidle_state dnv_cstates[] = {  	{  		.name = "C1E",  		.desc = "MWAIT 0x01", -		.flags = MWAIT2flg(0x01), +		.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,  		.exit_latency = 10,  		.target_residency = 20,  		.enter = &intel_idle, @@ -944,37 +951,19 @@ static void intel_idle_s2idle(struct cpuidle_device *dev,  	mwait_idle_with_hints(eax, ecx);  } -static void __setup_broadcast_timer(bool on) -{ -	if (on) -		tick_broadcast_enable(); -	else -		tick_broadcast_disable(); -} - -static void auto_demotion_disable(void) -{ -	unsigned long long msr_bits; - -	rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); -	msr_bits &= ~(icpu->auto_demotion_disable_flags); -	wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); -} -static void c1e_promotion_disable(void) -{ -	unsigned long long msr_bits; - -	rdmsrl(MSR_IA32_POWER_CTL, msr_bits); -	msr_bits &= ~0x2; -	wrmsrl(MSR_IA32_POWER_CTL, msr_bits); -} -  static const struct idle_cpu idle_cpu_nehalem = {  	.state_table = nehalem_cstates,  	.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,  	.disable_promotion_to_c1e = true,  }; +static const struct idle_cpu idle_cpu_nhx = { +	.state_table = nehalem_cstates, +	.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, +	.disable_promotion_to_c1e = true, +	.use_acpi = true, +}; +  static const struct idle_cpu idle_cpu_atom = {  	.state_table = atom_cstates,  }; @@ -993,6 +982,12 @@ static const struct idle_cpu idle_cpu_snb = {  	.disable_promotion_to_c1e = true,  }; +static const struct idle_cpu idle_cpu_snx = { +	.state_table = snb_cstates, +	.disable_promotion_to_c1e = true, +	.use_acpi = true, +}; +  static const struct idle_cpu idle_cpu_byt = {  	.state_table = byt_cstates,  	.disable_promotion_to_c1e = true, @@ -1013,6 +1008,7 @@ static const struct idle_cpu idle_cpu_ivb = {  static const struct idle_cpu idle_cpu_ivt = {  	.state_table = ivt_cstates,  	.disable_promotion_to_c1e = true, +	.use_acpi = true,  };  static const struct idle_cpu idle_cpu_hsw = { @@ -1020,11 +1016,23 @@ static const struct idle_cpu idle_cpu_hsw = {  	.disable_promotion_to_c1e = true,  }; +static const struct idle_cpu idle_cpu_hsx = { +	.state_table = hsw_cstates, +	.disable_promotion_to_c1e = true, +	.use_acpi = true, +}; +  static const struct idle_cpu idle_cpu_bdw = {  	.state_table = bdw_cstates,  	.disable_promotion_to_c1e = true,  }; +static const struct idle_cpu idle_cpu_bdx = { +	.state_table = bdw_cstates, +	.disable_promotion_to_c1e = true, +	.use_acpi = true, +}; +  static const struct idle_cpu idle_cpu_skl = {  	.state_table = skl_cstates,  	.disable_promotion_to_c1e = true, @@ -1033,15 +1041,18 @@ static const struct idle_cpu idle_cpu_skl = {  static const struct idle_cpu idle_cpu_skx = {  	.state_table = skx_cstates,  	.disable_promotion_to_c1e = true, +	.use_acpi = true,  };  static const struct idle_cpu idle_cpu_avn = {  	.state_table = avn_cstates,  	.disable_promotion_to_c1e = true, +	.use_acpi = true,  };  static const struct idle_cpu idle_cpu_knl = {  	.state_table = knl_cstates, +	.use_acpi = true,  };  static const struct idle_cpu idle_cpu_bxt = { @@ -1052,20 +1063,21 @@ static const struct idle_cpu idle_cpu_bxt = {  static const struct idle_cpu idle_cpu_dnv = {  	.state_table = dnv_cstates,  	.disable_promotion_to_c1e = true, +	.use_acpi = true,  };  static const struct x86_cpu_id intel_idle_ids[] __initconst = { -	INTEL_CPU_FAM6(NEHALEM_EP,		idle_cpu_nehalem), +	INTEL_CPU_FAM6(NEHALEM_EP,		idle_cpu_nhx),  	INTEL_CPU_FAM6(NEHALEM,			idle_cpu_nehalem),  	INTEL_CPU_FAM6(NEHALEM_G,		idle_cpu_nehalem),  	INTEL_CPU_FAM6(WESTMERE,		idle_cpu_nehalem), -	INTEL_CPU_FAM6(WESTMERE_EP,		idle_cpu_nehalem), -	INTEL_CPU_FAM6(NEHALEM_EX,		idle_cpu_nehalem), +	INTEL_CPU_FAM6(WESTMERE_EP,		idle_cpu_nhx), +	INTEL_CPU_FAM6(NEHALEM_EX,		idle_cpu_nhx),  	INTEL_CPU_FAM6(ATOM_BONNELL,		idle_cpu_atom),  	INTEL_CPU_FAM6(ATOM_BONNELL_MID,	idle_cpu_lincroft), -	INTEL_CPU_FAM6(WESTMERE_EX,		idle_cpu_nehalem), +	INTEL_CPU_FAM6(WESTMERE_EX,		idle_cpu_nhx),  	INTEL_CPU_FAM6(SANDYBRIDGE,		idle_cpu_snb), -	INTEL_CPU_FAM6(SANDYBRIDGE_X,		idle_cpu_snb), +	INTEL_CPU_FAM6(SANDYBRIDGE_X,		idle_cpu_snx),  	INTEL_CPU_FAM6(ATOM_SALTWELL,		idle_cpu_atom),  	INTEL_CPU_FAM6(ATOM_SILVERMONT,		idle_cpu_byt),  	INTEL_CPU_FAM6(ATOM_SILVERMONT_MID,	idle_cpu_tangier), @@ -1073,14 +1085,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {  	INTEL_CPU_FAM6(IVYBRIDGE,		idle_cpu_ivb),  	INTEL_CPU_FAM6(IVYBRIDGE_X,		idle_cpu_ivt),  	INTEL_CPU_FAM6(HASWELL,			idle_cpu_hsw), -	INTEL_CPU_FAM6(HASWELL_X,		idle_cpu_hsw), +	INTEL_CPU_FAM6(HASWELL_X,		idle_cpu_hsx),  	INTEL_CPU_FAM6(HASWELL_L,		idle_cpu_hsw),  	INTEL_CPU_FAM6(HASWELL_G,		idle_cpu_hsw),  	INTEL_CPU_FAM6(ATOM_SILVERMONT_D,	idle_cpu_avn),  	INTEL_CPU_FAM6(BROADWELL,		idle_cpu_bdw),  	INTEL_CPU_FAM6(BROADWELL_G,		idle_cpu_bdw), -	INTEL_CPU_FAM6(BROADWELL_X,		idle_cpu_bdw), -	INTEL_CPU_FAM6(BROADWELL_D,		idle_cpu_bdw), +	INTEL_CPU_FAM6(BROADWELL_X,		idle_cpu_bdx), +	INTEL_CPU_FAM6(BROADWELL_D,		idle_cpu_bdx),  	INTEL_CPU_FAM6(SKYLAKE_L,		idle_cpu_skl),  	INTEL_CPU_FAM6(SKYLAKE,			idle_cpu_skl),  	INTEL_CPU_FAM6(KABYLAKE_L,		idle_cpu_skl), @@ -1095,68 +1107,161 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {  	{}  }; -/* - * intel_idle_probe() +#define INTEL_CPU_FAM6_MWAIT \ +	{ X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 } + +static const struct x86_cpu_id intel_mwait_ids[] __initconst = { +	INTEL_CPU_FAM6_MWAIT, +	{} +}; + +static bool __init intel_idle_max_cstate_reached(int cstate) +{ +	if (cstate + 1 > max_cstate) { +		pr_info("max_cstate %d reached\n", max_cstate); +		return true; +	} +	return false; +} + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +#include <acpi/processor.h> + +static bool no_acpi __read_mostly; +module_param(no_acpi, bool, 0444); +MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list"); + +static struct acpi_processor_power acpi_state_table __initdata; + +/** + * intel_idle_cst_usable - Check if the _CST information can be used. + * + * Check if all of the C-states listed by _CST in the max_cstate range are + * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT.   */ -static int __init intel_idle_probe(void) +static bool __init intel_idle_cst_usable(void)  { -	unsigned int eax, ebx, ecx; -	const struct x86_cpu_id *id; +	int cstate, limit; -	if (max_cstate == 0) { -		pr_debug("disabled\n"); -		return -EPERM; +	limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1), +		      acpi_state_table.count); + +	for (cstate = 1; cstate < limit; cstate++) { +		struct acpi_processor_cx *cx = &acpi_state_table.states[cstate]; + +		if (cx->entry_method != ACPI_CSTATE_FFH) +			return false;  	} -	id = x86_match_cpu(intel_idle_ids); -	if (!id) { -		if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && -		    boot_cpu_data.x86 == 6) -			pr_debug("does not run on family %d model %d\n", -				 boot_cpu_data.x86, boot_cpu_data.x86_model); -		return -ENODEV; +	return true; +} + +static bool __init intel_idle_acpi_cst_extract(void) +{ +	unsigned int cpu; + +	if (no_acpi) { +		pr_debug("Not allowed to use ACPI _CST\n"); +		return false;  	} -	if (!boot_cpu_has(X86_FEATURE_MWAIT)) { -		pr_debug("Please enable MWAIT in BIOS SETUP\n"); -		return -ENODEV; +	for_each_possible_cpu(cpu) { +		struct acpi_processor *pr = per_cpu(processors, cpu); + +		if (!pr) +			continue; + +		if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table)) +			continue; + +		acpi_state_table.count++; + +		if (!intel_idle_cst_usable()) +			continue; + +		if (!acpi_processor_claim_cst_control()) { +			acpi_state_table.count = 0; +			return false; +		} + +		return true;  	} -	if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) -		return -ENODEV; +	pr_debug("ACPI _CST not found or not usable\n"); +	return false; +} -	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); +static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) +{ +	int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); -	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || -	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || -	    !mwait_substates) -			return -ENODEV; +	/* +	 * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of +	 * the interesting states are ACPI_CSTATE_FFH. +	 */ +	for (cstate = 1; cstate < limit; cstate++) { +		struct acpi_processor_cx *cx; +		struct cpuidle_state *state; -	pr_debug("MWAIT substates: 0x%x\n", mwait_substates); +		if (intel_idle_max_cstate_reached(cstate)) +			break; -	icpu = (const struct idle_cpu *)id->driver_data; -	cpuidle_state_table = icpu->state_table; +		cx = &acpi_state_table.states[cstate]; -	pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", -		 boot_cpu_data.x86_model); +		state = &drv->states[drv->state_count++]; -	return 0; +		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); +		strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); +		state->exit_latency = cx->latency; +		/* +		 * For C1-type C-states use the same number for both the exit +		 * latency and target residency, because that is the case for +		 * C1 in the majority of the static C-states tables above. +		 * For the other types of C-states, however, set the target +		 * residency to 3 times the exit latency which should lead to +		 * a reasonable balance between energy-efficiency and +		 * performance in the majority of interesting cases. +		 */ +		state->target_residency = cx->latency; +		if (cx->type > ACPI_STATE_C1) +			state->target_residency *= 3; + +		state->flags = MWAIT2flg(cx->address); +		if (cx->type > ACPI_STATE_C2) +			state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; + +		state->enter = intel_idle; +		state->enter_s2idle = intel_idle_s2idle; +	}  } -/* - * intel_idle_cpuidle_devices_uninit() - * Unregisters the cpuidle devices. - */ -static void intel_idle_cpuidle_devices_uninit(void) +static bool __init intel_idle_off_by_default(u32 mwait_hint)  { -	int i; -	struct cpuidle_device *dev; +	int cstate, limit; + +	/* +	 * If there are no _CST C-states, do not disable any C-states by +	 * default. +	 */ +	if (!acpi_state_table.count) +		return false; -	for_each_online_cpu(i) { -		dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); -		cpuidle_unregister_device(dev); +	limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); +	/* +	 * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of +	 * the interesting states are ACPI_CSTATE_FFH. +	 */ +	for (cstate = 1; cstate < limit; cstate++) { +		if (acpi_state_table.states[cstate].address == mwait_hint) +			return false;  	} +	return true;  } +#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */ +static inline bool intel_idle_acpi_cst_extract(void) { return false; } +static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } +static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } +#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */  /*   * ivt_idle_state_table_update(void) @@ -1164,7 +1269,7 @@ static void intel_idle_cpuidle_devices_uninit(void)   * Tune IVT multi-socket targets   * Assumption: num_sockets == (max_package_num + 1)   */ -static void ivt_idle_state_table_update(void) +static void __init ivt_idle_state_table_update(void)  {  	/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */  	int cpu, package_num, num_sockets = 1; @@ -1187,15 +1292,17 @@ static void ivt_idle_state_table_update(void)  	/* else, 1 and 2 socket systems use default ivt_cstates */  } -/* - * Translate IRTL (Interrupt Response Time Limit) MSR to usec +/** + * irtl_2_usec - IRTL to microseconds conversion. + * @irtl: IRTL MSR value. + * + * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds.   */ - -static unsigned int irtl_ns_units[] = { -	1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; - -static unsigned long long irtl_2_usec(unsigned long long irtl) +static unsigned long long __init irtl_2_usec(unsigned long long irtl)  { +	static const unsigned int irtl_ns_units[] __initconst = { +		1, 32, 1024, 32768, 1048576, 33554432, 0, 0 +	};  	unsigned long long ns;  	if (!irtl) @@ -1203,15 +1310,16 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)  	ns = irtl_ns_units[(irtl >> 10) & 0x7]; -	return div64_u64((irtl & 0x3FF) * ns, 1000); +	return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);  } +  /*   * bxt_idle_state_table_update(void)   *   * On BXT, we trust the IRTL to show the definitive maximum latency   * We use the same value for target_residency.   */ -static void bxt_idle_state_table_update(void) +static void __init bxt_idle_state_table_update(void)  {  	unsigned long long msr;  	unsigned int usec; @@ -1258,7 +1366,7 @@ static void bxt_idle_state_table_update(void)   * On SKL-H (model 0x5e) disable C8 and C9 if:   * C10 is enabled and SGX disabled   */ -static void sklh_idle_state_table_update(void) +static void __init sklh_idle_state_table_update(void)  {  	unsigned long long msr;  	unsigned int eax, ebx, ecx, edx; @@ -1294,16 +1402,28 @@ static void sklh_idle_state_table_update(void)  	skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE;	/* C8-SKL */  	skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE;	/* C9-SKL */  } -/* - * intel_idle_state_table_update() - * - * Update the default state_table for this CPU-id - */ -static void intel_idle_state_table_update(void) +static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)  { -	switch (boot_cpu_data.x86_model) { +	unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1; +	unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) & +					MWAIT_SUBSTATE_MASK; + +	/* Ignore the C-state if there are NO sub-states in CPUID for it. */ +	if (num_substates == 0) +		return false; + +	if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) +		mark_tsc_unstable("TSC halts in idle states deeper than C2"); + +	return true; +} +static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) +{ +	int cstate; + +	switch (boot_cpu_data.x86_model) {  	case INTEL_FAM6_IVYBRIDGE_X:  		ivt_idle_state_table_update();  		break; @@ -1315,62 +1435,36 @@ static void intel_idle_state_table_update(void)  		sklh_idle_state_table_update();  		break;  	} -} - -/* - * intel_idle_cpuidle_driver_init() - * allocate, initialize cpuidle_states - */ -static void __init intel_idle_cpuidle_driver_init(void) -{ -	int cstate; -	struct cpuidle_driver *drv = &intel_idle_driver; - -	intel_idle_state_table_update(); - -	cpuidle_poll_state_init(drv); -	drv->state_count = 1;  	for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { -		int num_substates, mwait_hint, mwait_cstate; +		unsigned int mwait_hint; -		if ((cpuidle_state_table[cstate].enter == NULL) && -		    (cpuidle_state_table[cstate].enter_s2idle == NULL)) +		if (intel_idle_max_cstate_reached(cstate))  			break; -		if (cstate + 1 > max_cstate) { -			pr_info("max_cstate %d reached\n", max_cstate); +		if (!cpuidle_state_table[cstate].enter && +		    !cpuidle_state_table[cstate].enter_s2idle)  			break; -		} - -		mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); -		mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint); - -		/* number of sub-states for this state in CPUID.MWAIT */ -		num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4)) -					& MWAIT_SUBSTATE_MASK; -		/* if NO sub-states for this state in CPUID, skip it */ -		if (num_substates == 0) -			continue; - -		/* if state marked as disabled, skip it */ +		/* If marked as unusable, skip this state. */  		if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {  			pr_debug("state %s is disabled\n",  				 cpuidle_state_table[cstate].name);  			continue;  		} +		mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); +		if (!intel_idle_verify_cstate(mwait_hint)) +			continue; -		if (((mwait_cstate + 1) > 2) && -			!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) -			mark_tsc_unstable("TSC halts in idle" -					" states deeper than C2"); +		/* Structure copy. */ +		drv->states[drv->state_count] = cpuidle_state_table[cstate]; -		drv->states[drv->state_count] =	/* structure copy */ -			cpuidle_state_table[cstate]; +		if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) && +		    !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)) +			drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF; -		drv->state_count += 1; +		drv->state_count++;  	}  	if (icpu->byt_auto_demotion_disable_flag) { @@ -1379,6 +1473,38 @@ static void __init intel_idle_cpuidle_driver_init(void)  	}  } +/* + * intel_idle_cpuidle_driver_init() + * allocate, initialize cpuidle_states + */ +static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv) +{ +	cpuidle_poll_state_init(drv); +	drv->state_count = 1; + +	if (icpu) +		intel_idle_init_cstates_icpu(drv); +	else +		intel_idle_init_cstates_acpi(drv); +} + +static void auto_demotion_disable(void) +{ +	unsigned long long msr_bits; + +	rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); +	msr_bits &= ~(icpu->auto_demotion_disable_flags); +	wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); +} + +static void c1e_promotion_disable(void) +{ +	unsigned long long msr_bits; + +	rdmsrl(MSR_IA32_POWER_CTL, msr_bits); +	msr_bits &= ~0x2; +	wrmsrl(MSR_IA32_POWER_CTL, msr_bits); +}  /*   * intel_idle_cpu_init() @@ -1397,6 +1523,9 @@ static int intel_idle_cpu_init(unsigned int cpu)  		return -EIO;  	} +	if (!icpu) +		return 0; +  	if (icpu->auto_demotion_disable_flags)  		auto_demotion_disable(); @@ -1411,7 +1540,7 @@ static int intel_idle_cpu_online(unsigned int cpu)  	struct cpuidle_device *dev;  	if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) -		__setup_broadcast_timer(true); +		tick_broadcast_enable();  	/*  	 * Some systems can hotplug a cpu at runtime after @@ -1425,23 +1554,74 @@ static int intel_idle_cpu_online(unsigned int cpu)  	return 0;  } +/** + * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices. + */ +static void __init intel_idle_cpuidle_devices_uninit(void) +{ +	int i; + +	for_each_online_cpu(i) +		cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i)); +} +  static int __init intel_idle_init(void)  { +	const struct x86_cpu_id *id; +	unsigned int eax, ebx, ecx;  	int retval;  	/* Do not load intel_idle at all for now if idle= is passed */  	if (boot_option_idle_override != IDLE_NO_OVERRIDE)  		return -ENODEV; -	retval = intel_idle_probe(); -	if (retval) -		return retval; +	if (max_cstate == 0) { +		pr_debug("disabled\n"); +		return -EPERM; +	} + +	id = x86_match_cpu(intel_idle_ids); +	if (id) { +		if (!boot_cpu_has(X86_FEATURE_MWAIT)) { +			pr_debug("Please enable MWAIT in BIOS SETUP\n"); +			return -ENODEV; +		} +	} else { +		id = x86_match_cpu(intel_mwait_ids); +		if (!id) +			return -ENODEV; +	} + +	if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) +		return -ENODEV; + +	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); + +	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || +	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || +	    !mwait_substates) +			return -ENODEV; + +	pr_debug("MWAIT substates: 0x%x\n", mwait_substates); + +	icpu = (const struct idle_cpu *)id->driver_data; +	if (icpu) { +		cpuidle_state_table = icpu->state_table; +		if (icpu->use_acpi) +			intel_idle_acpi_cst_extract(); +	} else if (!intel_idle_acpi_cst_extract()) { +		return -ENODEV; +	} + +	pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", +		 boot_cpu_data.x86_model);  	intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); -	if (intel_idle_cpuidle_devices == NULL) +	if (!intel_idle_cpuidle_devices)  		return -ENOMEM; -	intel_idle_cpuidle_driver_init(); +	intel_idle_cpuidle_driver_init(&intel_idle_driver); +  	retval = cpuidle_register_driver(&intel_idle_driver);  	if (retval) {  		struct cpuidle_driver *drv = cpuidle_get_driver();  | 
