diff options
Diffstat (limited to 'include/linux/mm_types.h')
| -rw-r--r-- | include/linux/mm_types.h | 82 | 
1 files changed, 74 insertions, 8 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3fc9e680f174..306a3d1a0fa6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -573,6 +573,13 @@ struct vm_area_struct {  	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;  } __randomize_layout; +#ifdef CONFIG_SCHED_MM_CID +struct mm_cid { +	u64 time; +	int cid; +}; +#endif +  struct kioctx_table;  struct mm_struct {  	struct { @@ -623,15 +630,19 @@ struct mm_struct {  		atomic_t mm_count;  #ifdef CONFIG_SCHED_MM_CID  		/** -		 * @cid_lock: Protect cid bitmap updates vs lookups. +		 * @pcpu_cid: Per-cpu current cid.  		 * -		 * Prevent situations where updates to the cid bitmap happen -		 * concurrently with lookups. Those can lead to situations -		 * where a lookup cannot find a free bit simply because it was -		 * unlucky enough to load, non-atomically, bitmap words as they -		 * were being concurrently updated by the updaters. +		 * Keep track of the currently allocated mm_cid for each cpu. +		 * The per-cpu mm_cid values are serialized by their respective +		 * runqueue locks.  		 */ -		raw_spinlock_t cid_lock; +		struct mm_cid __percpu *pcpu_cid; +		/* +		 * @mm_cid_next_scan: Next mm_cid scan (in jiffies). +		 * +		 * When the next mm_cid scan is due (in jiffies). +		 */ +		unsigned long mm_cid_next_scan;  #endif  #ifdef CONFIG_MMU  		atomic_long_t pgtables_bytes;	/* size of all page tables */ @@ -899,6 +910,37 @@ static inline void vma_iter_init(struct vma_iterator *vmi,  }  #ifdef CONFIG_SCHED_MM_CID + +enum mm_cid_state { +	MM_CID_UNSET = -1U,		/* Unset state has lazy_put flag set. */ +	MM_CID_LAZY_PUT = (1U << 31), +}; + +static inline bool mm_cid_is_unset(int cid) +{ +	return cid == MM_CID_UNSET; +} + +static inline bool mm_cid_is_lazy_put(int cid) +{ +	return !mm_cid_is_unset(cid) && (cid & MM_CID_LAZY_PUT); +} + +static inline bool mm_cid_is_valid(int cid) +{ +	return !(cid & MM_CID_LAZY_PUT); +} + +static inline int mm_cid_set_lazy_put(int cid) +{ +	return cid | MM_CID_LAZY_PUT; +} + +static inline int mm_cid_clear_lazy_put(int cid) +{ +	return cid & ~MM_CID_LAZY_PUT; +} +  /* Accessor for struct mm_struct's cidmask. */  static inline cpumask_t *mm_cidmask(struct mm_struct *mm)  { @@ -912,16 +954,40 @@ static inline cpumask_t *mm_cidmask(struct mm_struct *mm)  static inline void mm_init_cid(struct mm_struct *mm)  { -	raw_spin_lock_init(&mm->cid_lock); +	int i; + +	for_each_possible_cpu(i) { +		struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i); + +		pcpu_cid->cid = MM_CID_UNSET; +		pcpu_cid->time = 0; +	}  	cpumask_clear(mm_cidmask(mm));  } +static inline int mm_alloc_cid(struct mm_struct *mm) +{ +	mm->pcpu_cid = alloc_percpu(struct mm_cid); +	if (!mm->pcpu_cid) +		return -ENOMEM; +	mm_init_cid(mm); +	return 0; +} + +static inline void mm_destroy_cid(struct mm_struct *mm) +{ +	free_percpu(mm->pcpu_cid); +	mm->pcpu_cid = NULL; +} +  static inline unsigned int mm_cid_size(void)  {  	return cpumask_size();  }  #else /* CONFIG_SCHED_MM_CID */  static inline void mm_init_cid(struct mm_struct *mm) { } +static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; } +static inline void mm_destroy_cid(struct mm_struct *mm) { }  static inline unsigned int mm_cid_size(void)  {  	return 0;  | 
