summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/compiler-gcc.h15
-rw-r--r--include/linux/compiler.h5
-rw-r--r--include/linux/kconfig.h9
-rw-r--r--include/linux/memcontrol.h24
-rw-r--r--include/linux/ptr_ring.h2
-rw-r--r--include/linux/sched/mm.h13
-rw-r--r--include/linux/sched/user.h4
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/workqueue.h1
10 files changed, 63 insertions, 14 deletions
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 73bc63e0a1c4..901c1ccb3374 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -208,6 +208,15 @@
#endif
/*
+ * calling noreturn functions, __builtin_unreachable() and __builtin_trap()
+ * confuse the stack allocation in gcc, leading to overly large stack
+ * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
+ *
+ * Adding an empty inline assembly before it works around the problem
+ */
+#define barrier_before_unreachable() asm volatile("")
+
+/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
@@ -217,7 +226,11 @@
* unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() \
- do { annotate_unreachable(); __builtin_unreachable(); } while (0)
+ do { \
+ annotate_unreachable(); \
+ barrier_before_unreachable(); \
+ __builtin_unreachable(); \
+ } while (0)
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e835fc0423ec..ab4711c63601 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -86,6 +86,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define barrier_data(ptr) barrier()
#endif
+/* workaround for GCC PR82365 if needed */
+#ifndef barrier_before_unreachable
+# define barrier_before_unreachable() do { } while (0)
+#endif
+
/* Unreachable code */
#ifdef CONFIG_STACK_VALIDATION
/*
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index fec5076eda91..dcde9471897d 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -4,6 +4,12 @@
#include <generated/autoconf.h>
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#else
+#define __LITTLE_ENDIAN 1234
+#endif
+
#define __ARG_PLACEHOLDER_1 0,
#define __take_second_arg(__ignored, val, ...) val
@@ -64,4 +70,7 @@
*/
#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
+/* Make sure we always have all types and struct attributes defined. */
+#include <linux/compiler_types.h>
+
#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 882046863581..c46016bb25eb 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -523,9 +523,11 @@ static inline void __mod_memcg_state(struct mem_cgroup *memcg,
static inline void mod_memcg_state(struct mem_cgroup *memcg,
int idx, int val)
{
- preempt_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
__mod_memcg_state(memcg, idx, val);
- preempt_enable();
+ local_irq_restore(flags);
}
/**
@@ -606,9 +608,11 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
- preempt_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
__mod_lruvec_state(lruvec, idx, val);
- preempt_enable();
+ local_irq_restore(flags);
}
static inline void __mod_lruvec_page_state(struct page *page,
@@ -630,9 +634,11 @@ static inline void __mod_lruvec_page_state(struct page *page,
static inline void mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
- preempt_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
__mod_lruvec_page_state(page, idx, val);
- preempt_enable();
+ local_irq_restore(flags);
}
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
@@ -659,9 +665,11 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
static inline void count_memcg_events(struct mem_cgroup *memcg,
int idx, unsigned long count)
{
- preempt_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
__count_memcg_events(memcg, idx, count);
- preempt_enable();
+ local_irq_restore(flags);
}
/* idx can be of type enum memcg_event_item or vm_event_item */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index b884b7794187..e6335227b844 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -469,7 +469,7 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
*/
static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
{
- if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
+ if (size > KMALLOC_MAX_SIZE / sizeof(void *))
return NULL;
return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
}
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 1149533aa2fa..9806184bb3d5 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -36,7 +36,18 @@ static inline void mmgrab(struct mm_struct *mm)
atomic_inc(&mm->mm_count);
}
-extern void mmdrop(struct mm_struct *mm);
+extern void __mmdrop(struct mm_struct *mm);
+
+static inline void mmdrop(struct mm_struct *mm)
+{
+ /*
+ * The implicit full barrier implied by atomic_dec_and_test() is
+ * required by the membarrier system call before returning to
+ * user-space, after storing to rq->curr.
+ */
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ __mmdrop(mm);
+}
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 0dcf4e480ef7..96fe289c4c6e 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
#include <linux/uidgid.h>
#include <linux/atomic.h>
+#include <linux/ratelimit.h>
struct key;
@@ -41,6 +42,9 @@ struct user_struct {
defined(CONFIG_NET)
atomic_long_t locked_vm;
#endif
+
+ /* Miscellaneous per-user rate limit */
+ struct ratelimit_state ratelimit;
};
extern int uids_sysfs_init(void);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5ebc0f869720..c1e66bdcf583 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3646,7 +3646,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
return true;
}
-/* For small packets <= CHECKSUM_BREAK peform checksum complete directly
+/* For small packets <= CHECKSUM_BREAK perform checksum complete directly
* in checksum_init.
*/
#define CHECKSUM_BREAK 76
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7b6a59f722a3..a1a3f4ed94ce 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -337,8 +337,6 @@ extern void deactivate_file_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
-extern void add_page_to_unevictable_list(struct page *page);
-
extern void lru_cache_add_active_or_unevictable(struct page *page,
struct vm_area_struct *vma);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4a54ef96aff5..bc0cda180c8b 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -465,6 +465,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
+extern struct work_struct *current_work(void);
extern bool current_is_workqueue_rescuer(void);
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);