diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-03-11 17:29:55 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-03-11 17:29:55 -0700 |
commit | 2edfd1046f555db6456514bc8ffe0847537e54f4 (patch) | |
tree | 177ed7d83825d1c6709f7609e87fff0b5e01fdbf /arch/x86/include/asm/resctrl.h | |
parent | bfdb395a7cde12d83a623949ed029b0ab38d765b (diff) | |
parent | c0d848fcb09d80a5f48b99f85e448185125ef59f (diff) |
Merge tag 'x86_cache_for_v6.9_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull resource control updates from Borislav Petkov:
- Rework different aspects of the resctrl code like adding
arch-specific accessors and splitting the locking, in order to
accomodate ARM's MPAM implementation of hw resource control and be
able to use the same filesystem control interface like on x86. Work
by James Morse
- Improve the memory bandwidth throttling heuristic to handle workloads
with not too regular load levels which end up penalized unnecessarily
- Use CPUID to detect the memory bandwidth enforcement limit on AMD
- The usual set of fixes
* tag 'x86_cache_for_v6.9_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (30 commits)
x86/resctrl: Remove lockdep annotation that triggers false positive
x86/resctrl: Separate arch and fs resctrl locks
x86/resctrl: Move domain helper migration into resctrl_offline_cpu()
x86/resctrl: Add CPU offline callback for resctrl work
x86/resctrl: Allow overflow/limbo handlers to be scheduled on any-but CPU
x86/resctrl: Add CPU online callback for resctrl work
x86/resctrl: Add helpers for system wide mon/alloc capable
x86/resctrl: Make rdt_enable_key the arch's decision to switch
x86/resctrl: Move alloc/mon static keys into helpers
x86/resctrl: Make resctrl_mounted checks explicit
x86/resctrl: Allow arch to allocate memory needed in resctrl_arch_rmid_read()
x86/resctrl: Allow resctrl_arch_rmid_read() to sleep
x86/resctrl: Queue mon_event_read() instead of sending an IPI
x86/resctrl: Add cpumask_any_housekeeping() for limbo/overflow
x86/resctrl: Move CLOSID/RMID matching and setting to use helpers
x86/resctrl: Allocate the cleanest CLOSID by searching closid_num_dirty_rmid
x86/resctrl: Use __set_bit()/__clear_bit() instead of open coding
x86/resctrl: Track the number of dirty RMID a CLOSID has
x86/resctrl: Allow RMID allocation to be scoped by CLOSID
x86/resctrl: Access per-rmid structures by index
...
Diffstat (limited to 'arch/x86/include/asm/resctrl.h')
-rw-r--r-- | arch/x86/include/asm/resctrl.h | 90 |
1 files changed, 90 insertions, 0 deletions
diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 255a78d9d906..12dbd2588ca7 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -7,6 +7,13 @@ #include <linux/sched.h> #include <linux/jump_label.h> +/* + * This value can never be a valid CLOSID, and is used when mapping a + * (closid, rmid) pair to an index and back. On x86 only the RMID is + * needed. The index is a software defined value. + */ +#define X86_RESCTRL_EMPTY_CLOSID ((u32)~0) + /** * struct resctrl_pqr_state - State cache for the PQR MSR * @cur_rmid: The cached Resource Monitoring ID @@ -31,10 +38,47 @@ struct resctrl_pqr_state { DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); +extern bool rdt_alloc_capable; +extern bool rdt_mon_capable; + DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); +static inline bool resctrl_arch_alloc_capable(void) +{ + return rdt_alloc_capable; +} + +static inline void resctrl_arch_enable_alloc(void) +{ + static_branch_enable_cpuslocked(&rdt_alloc_enable_key); + static_branch_inc_cpuslocked(&rdt_enable_key); +} + +static inline void resctrl_arch_disable_alloc(void) +{ + static_branch_disable_cpuslocked(&rdt_alloc_enable_key); + static_branch_dec_cpuslocked(&rdt_enable_key); +} + +static inline bool resctrl_arch_mon_capable(void) +{ + return rdt_mon_capable; +} + +static inline void resctrl_arch_enable_mon(void) +{ + static_branch_enable_cpuslocked(&rdt_mon_enable_key); + static_branch_inc_cpuslocked(&rdt_enable_key); +} + +static inline void resctrl_arch_disable_mon(void) +{ + static_branch_disable_cpuslocked(&rdt_mon_enable_key); + static_branch_dec_cpuslocked(&rdt_enable_key); +} + /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * @@ -88,12 +132,58 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val * scale; } +static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk, + u32 closid, u32 rmid) +{ + WRITE_ONCE(tsk->closid, closid); + WRITE_ONCE(tsk->rmid, rmid); +} + +static inline bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) +{ + return READ_ONCE(tsk->closid) == closid; +} + +static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored, + u32 rmid) +{ + return READ_ONCE(tsk->rmid) == rmid; +} + static inline void resctrl_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) __resctrl_sched_in(tsk); } +static inline u32 resctrl_arch_system_num_rmid_idx(void) +{ + /* RMID are independent numbers for x86. num_rmid_idx == num_rmid */ + return boot_cpu_data.x86_cache_max_rmid + 1; +} + +static inline void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid) +{ + *rmid = idx; + *closid = X86_RESCTRL_EMPTY_CLOSID; +} + +static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid) +{ + return rmid; +} + +/* x86 can always read an rmid, nothing needs allocating */ +struct rdt_resource; +static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid) +{ + might_sleep(); + return NULL; +}; + +static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, + void *ctx) { }; + void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else |