diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2021-12-14 09:43:25 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2021-12-14 10:24:28 +0100 |
commit | 99b03ca651f1c409d296d6c6e9440d9b005c722f (patch) | |
tree | 8773b8cbab631258b561ddb3d9b219072095bee4 /arch/x86/kernel/tsc_sync.c | |
parent | 211b4dbc070090b4183d6f9db7dd3bd4e6170447 (diff) | |
parent | 2585cf9dfaaddf00b069673f27bb3f8530e2039c (diff) |
Merge v5.16-rc5 into drm-next
Thomas Zimmermann requested a fixes backmerge, specifically also for
96c5f82ef0a1 ("drm/vc4: fix error code in vc4_create_object()")
Just a bunch of adjacent changes conflicts, even the big pile of them
in vc4.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'arch/x86/kernel/tsc_sync.c')
-rw-r--r-- | arch/x86/kernel/tsc_sync.c | 41 |
1 files changed, 41 insertions, 0 deletions
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 50a4515fe0ad..9452dc9664b5 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -30,6 +30,7 @@ struct tsc_adjust { }; static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); +static struct timer_list tsc_sync_check_timer; /* * TSC's on different sockets may be reset asynchronously. @@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume) } } +/* + * Normally the tsc_sync will be checked every time system enters idle + * state, but there is still caveat that a system won't enter idle, + * either because it's too busy or configured purposely to not enter + * idle. + * + * So setup a periodic timer (every 10 minutes) to make sure the check + * is always on. + */ + +#define SYNC_CHECK_INTERVAL (HZ * 600) + +static void tsc_sync_check_timer_fn(struct timer_list *unused) +{ + int next_cpu; + + tsc_verify_tsc_adjust(false); + + /* Run the check for all onlined CPUs in turn */ + next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); + if (next_cpu >= nr_cpu_ids) + next_cpu = cpumask_first(cpu_online_mask); + + tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL; + add_timer_on(&tsc_sync_check_timer, next_cpu); +} + +static int __init start_sync_check_timer(void) +{ + if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable) + return 0; + + timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0); + tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL; + add_timer(&tsc_sync_check_timer); + + return 0; +} +late_initcall(start_sync_check_timer); + static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval, unsigned int cpu, bool bootcpu) { |