summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/btf.c82
-rw-r--r--kernel/bpf/core.c55
-rw-r--r--kernel/bpf/local_storage.c3
-rw-r--r--kernel/bpf/queue_stack_maps.c16
-rw-r--r--kernel/bpf/verifier.c118
-rw-r--r--kernel/cpu.c15
-rw-r--r--kernel/dma/direct.c7
-rw-r--r--kernel/dma/swiotlb.c3
-rw-r--r--kernel/events/uprobes.c14
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/futex.c69
-rw-r--r--kernel/irq/affinity.c176
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/devres.c4
-rw-r--r--kernel/irq/ipi.c4
-rw-r--r--kernel/irq/irq_sim.c23
-rw-r--r--kernel/irq/irqdesc.c28
-rw-r--r--kernel/irq/irqdomain.c4
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/matrix.c34
-rw-r--r--kernel/irq/msi.c8
-rw-r--r--kernel/irq/spurious.c6
-rw-r--r--kernel/kcov.c4
-rw-r--r--kernel/kexec_file.c70
-rw-r--r--kernel/power/main.c15
-rw-r--r--kernel/power/qos.c15
-rw-r--r--kernel/ptrace.c10
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/cpufreq.c5
-rw-r--r--kernel/sched/cpufreq_schedutil.c5
-rw-r--r--kernel/sched/psi.c30
-rw-r--r--kernel/sched/sched.h4
-rw-r--r--kernel/sched/stats.h8
-rw-r--r--kernel/stackleak.c6
-rw-r--r--kernel/sys.c8
-rw-r--r--kernel/time/alarmtimer.c5
-rw-r--r--kernel/time/clockevents.c18
-rw-r--r--kernel/time/clocksource.c20
-rw-r--r--kernel/time/hrtimer.c19
-rw-r--r--kernel/time/itimer.c2
-rw-r--r--kernel/time/jiffies.c28
-rw-r--r--kernel/time/ntp.c1
-rw-r--r--kernel/time/posix-clock.c17
-rw-r--r--kernel/time/posix-stubs.c5
-rw-r--r--kernel/time/posix-timers.c30
-rw-r--r--kernel/time/sched_clock.c9
-rw-r--r--kernel/time/test_udelay.c10
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c4
-rw-r--r--kernel/time/tick-broadcast.c6
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/time/tick-oneshot.c6
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/time/time.c13
-rw-r--r--kernel/time/timeconst.bc2
-rw-r--r--kernel/time/timeconv.c1
-rw-r--r--kernel/time/timecounter.c17
-rw-r--r--kernel/time/timekeeping.c15
-rw-r--r--kernel/time/timekeeping_debug.c26
-rw-r--r--kernel/time/timer.c3
-rw-r--r--kernel/time/timer_list.c7
-rw-r--r--kernel/trace/bpf_trace.c8
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/trace.h57
-rw-r--r--kernel/trace/trace_events_filter.c5
-rw-r--r--kernel/trace/trace_events_trigger.c6
-rw-r--r--kernel/trace/trace_functions_graph.c53
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
68 files changed, 833 insertions, 464 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index ee4c82667d65..4da543d6bea2 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -5,6 +5,7 @@
#include <uapi/linux/types.h>
#include <linux/seq_file.h>
#include <linux/compiler.h>
+#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/anon_inodes.h>
@@ -426,6 +427,30 @@ static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
offset < btf->hdr.str_len;
}
+/* Only C-style identifier is permitted. This can be relaxed if
+ * necessary.
+ */
+static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
+{
+ /* offset must be valid */
+ const char *src = &btf->strings[offset];
+ const char *src_limit;
+
+ if (!isalpha(*src) && *src != '_')
+ return false;
+
+ /* set a limit on identifier length */
+ src_limit = src + KSYM_NAME_LEN;
+ src++;
+ while (*src && src < src_limit) {
+ if (!isalnum(*src) && *src != '_')
+ return false;
+ src++;
+ }
+
+ return !*src;
+}
+
static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
{
if (!offset)
@@ -1143,6 +1168,22 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* typedef type must have a valid name, and other ref types,
+ * volatile, const, restrict, should have a null name.
+ */
+ if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
+ if (!t->name_off ||
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+ } else {
+ if (t->name_off) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+ }
+
btf_verifier_log_type(env, t, NULL);
return 0;
@@ -1300,6 +1341,13 @@ static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* fwd type must have a valid name */
+ if (!t->name_off ||
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
btf_verifier_log_type(env, t, NULL);
return 0;
@@ -1356,6 +1404,12 @@ static s32 btf_array_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* array type should not have a name */
+ if (t->name_off) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
if (btf_type_vlen(t)) {
btf_verifier_log_type(env, t, "vlen != 0");
return -EINVAL;
@@ -1532,6 +1586,13 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* struct type either no name or a valid one */
+ if (t->name_off &&
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
btf_verifier_log_type(env, t, NULL);
last_offset = 0;
@@ -1543,6 +1604,12 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* struct member either no name or a valid one */
+ if (member->name_off &&
+ !btf_name_valid_identifier(btf, member->name_off)) {
+ btf_verifier_log_member(env, t, member, "Invalid name");
+ return -EINVAL;
+ }
/* A member cannot be in type void */
if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
btf_verifier_log_member(env, t, member,
@@ -1730,6 +1797,13 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* enum type either no name or a valid one */
+ if (t->name_off &&
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
btf_verifier_log_type(env, t, NULL);
for (i = 0; i < nr_enums; i++) {
@@ -1739,6 +1813,14 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* enum member must have a valid name */
+ if (!enums[i].name_off ||
+ !btf_name_valid_identifier(btf, enums[i].name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
+
btf_verifier_log(env, "\t%s val=%d\n",
btf_name_by_offset(btf, enums[i].name_off),
enums[i].val);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1a796e0799ec..b2890c268cb3 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -365,13 +365,11 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
}
#ifdef CONFIG_BPF_JIT
-# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
-
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
int bpf_jit_harden __read_mostly;
int bpf_jit_kallsyms __read_mostly;
-int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
+long bpf_jit_limit __read_mostly;
static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog *prog,
@@ -580,16 +578,27 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
static atomic_long_t bpf_jit_current;
+/* Can be overridden by an arch's JIT compiler if it has a custom,
+ * dedicated BPF backend memory area, or if neither of the two
+ * below apply.
+ */
+u64 __weak bpf_jit_alloc_exec_limit(void)
+{
#if defined(MODULES_VADDR)
+ return MODULES_END - MODULES_VADDR;
+#else
+ return VMALLOC_END - VMALLOC_START;
+#endif
+}
+
static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
- bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
- PAGE_SIZE), INT_MAX);
+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
+ PAGE_SIZE), LONG_MAX);
return 0;
}
pure_initcall(bpf_jit_charge_init);
-#endif
static int bpf_jit_charge_modmem(u32 pages)
{
@@ -672,6 +681,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+ const struct bpf_insn *insn, bool extra_pass,
+ u64 *func_addr, bool *func_addr_fixed)
+{
+ s16 off = insn->off;
+ s32 imm = insn->imm;
+ u8 *addr;
+
+ *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
+ if (!*func_addr_fixed) {
+ /* Place-holder address till the last pass has collected
+ * all addresses for JITed subprograms in which case we
+ * can pick them up from prog->aux.
+ */
+ if (!extra_pass)
+ addr = NULL;
+ else if (prog->aux->func &&
+ off >= 0 && off < prog->aux->func_cnt)
+ addr = (u8 *)prog->aux->func[off]->bpf_func;
+ else
+ return -EINVAL;
+ } else {
+ /* Address of a BPF helper call. Since part of the core
+ * kernel, it's always at a fixed location. __bpf_call_base
+ * and the helper with imm relative to it are both in core
+ * kernel.
+ */
+ addr = (u8 *)__bpf_call_base + imm;
+ }
+
+ *func_addr = (unsigned long)addr;
+ return 0;
+}
+
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
struct bpf_insn *to_buff)
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index c97a8f968638..bed9d48a7ae9 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -139,7 +139,8 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
return -ENOENT;
new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
- map->value_size, __GFP_ZERO | GFP_USER,
+ map->value_size,
+ __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
map->numa_node);
if (!new)
return -ENOMEM;
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 8bbd72d3a121..b384ea9f3254 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -7,6 +7,7 @@
#include <linux/bpf.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/capability.h>
#include "percpu_freelist.h"
#define QUEUE_STACK_CREATE_FLAG_MASK \
@@ -45,8 +46,12 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
/* Called from syscall */
static int queue_stack_map_alloc_check(union bpf_attr *attr)
{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 0 ||
+ attr->value_size == 0 ||
attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
return -EINVAL;
@@ -63,15 +68,10 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
{
int ret, numa_node = bpf_map_attr_numa_node(attr);
struct bpf_queue_stack *qs;
- u32 size, value_size;
- u64 queue_size, cost;
-
- size = attr->max_entries + 1;
- value_size = attr->value_size;
-
- queue_size = sizeof(*qs) + (u64) value_size * size;
+ u64 size, queue_size, cost;
- cost = queue_size;
+ size = (u64) attr->max_entries + 1;
+ cost = queue_size = sizeof(*qs) + size * attr->value_size;
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-E2BIG);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 1971ca325fb4..51ba84d4d34a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -175,6 +175,7 @@ struct bpf_verifier_stack_elem {
#define BPF_COMPLEXITY_LIMIT_INSNS 131072
#define BPF_COMPLEXITY_LIMIT_STACK 1024
+#define BPF_COMPLEXITY_LIMIT_STATES 64
#define BPF_MAP_PTR_UNPRIV 1UL
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
@@ -3751,6 +3752,79 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
}
}
+/* compute branch direction of the expression "if (reg opcode val) goto target;"
+ * and return:
+ * 1 - branch will be taken and "goto target" will be executed
+ * 0 - branch will not be taken and fall-through to next insn
+ * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
+ */
+static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
+{
+ if (__is_pointer_value(false, reg))
+ return -1;
+
+ switch (opcode) {
+ case BPF_JEQ:
+ if (tnum_is_const(reg->var_off))
+ return !!tnum_equals_const(reg->var_off, val);
+ break;
+ case BPF_JNE:
+ if (tnum_is_const(reg->var_off))
+ return !tnum_equals_const(reg->var_off, val);
+ break;
+ case BPF_JGT:
+ if (reg->umin_value > val)
+ return 1;
+ else if (reg->umax_value <= val)
+ return 0;
+ break;
+ case BPF_JSGT:
+ if (reg->smin_value > (s64)val)
+ return 1;
+ else if (reg->smax_value < (s64)val)
+ return 0;
+ break;
+ case BPF_JLT:
+ if (reg->umax_value < val)
+ return 1;
+ else if (reg->umin_value >= val)
+ return 0;
+ break;
+ case BPF_JSLT:
+ if (reg->smax_value < (s64)val)
+ return 1;
+ else if (reg->smin_value >= (s64)val)
+ return 0;
+ break;
+ case BPF_JGE:
+ if (reg->umin_value >= val)
+ return 1;
+ else if (reg->umax_value < val)
+ return 0;
+ break;
+ case BPF_JSGE:
+ if (reg->smin_value >= (s64)val)
+ return 1;
+ else if (reg->smax_value < (s64)val)
+ return 0;
+ break;
+ case BPF_JLE:
+ if (reg->umax_value <= val)
+ return 1;
+ else if (reg->umin_value > val)
+ return 0;
+ break;
+ case BPF_JSLE:
+ if (reg->smax_value <= (s64)val)
+ return 1;
+ else if (reg->smin_value > (s64)val)
+ return 0;
+ break;
+ }
+
+ return -1;
+}
+
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
@@ -4152,21 +4226,15 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
dst_reg = &regs[insn->dst_reg];
- /* detect if R == 0 where R was initialized to zero earlier */
- if (BPF_SRC(insn->code) == BPF_K &&
- (opcode == BPF_JEQ || opcode == BPF_JNE) &&
- dst_reg->type == SCALAR_VALUE &&
- tnum_is_const(dst_reg->var_off)) {
- if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
- (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
- /* if (imm == imm) goto pc+off;
- * only follow the goto, ignore fall-through
- */
+ if (BPF_SRC(insn->code) == BPF_K) {
+ int pred = is_branch_taken(dst_reg, insn->imm, opcode);
+
+ if (pred == 1) {
+ /* only follow the goto, ignore fall-through */
*insn_idx += insn->off;
return 0;
- } else {
- /* if (imm != imm) goto pc+off;
- * only follow fall-through branch, since
+ } else if (pred == 0) {
+ /* only follow fall-through branch, since
* that's where the program will go
*/
return 0;
@@ -4980,7 +5048,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state, *new;
- int i, j, err;
+ int i, j, err, states_cnt = 0;
sl = env->explored_states[insn_idx];
if (!sl)
@@ -5007,8 +5075,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
return 1;
}
sl = sl->next;
+ states_cnt++;
}
+ if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
+ return 0;
+
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
* but it will either reach outer most bpf_exit (which means it's safe)
@@ -5030,9 +5102,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
}
new_sl->next = env->explored_states[insn_idx];
env->explored_states[insn_idx] = new_sl;
- /* connect new state to parentage chain */
- for (i = 0; i < BPF_REG_FP; i++)
- cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i];
+ /* connect new state to parentage chain. Current frame needs all
+ * registers connected. Only r6 - r9 of the callers are alive (pushed
+ * to the stack implicitly by JITs) so in callers' frames connect just
+ * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
+ * the state of the call instruction (with WRITTEN set), and r0 comes
+ * from callee with its full parentage chain, anyway.
+ */
+ for (j = 0; j <= cur->curframe; j++)
+ for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
+ cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
/* clear write marks in current state: the writes we did are not writes
* our child did, so they don't screen off its reads from us.
* (There are no read marks in current state, because reads always mark
@@ -5148,6 +5227,9 @@ static int do_check(struct bpf_verifier_env *env)
goto process_bpf_exit;
}
+ if (signal_pending(current))
+ return -EAGAIN;
+
if (need_resched())
cond_resched();
@@ -5650,7 +5732,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
return;
/* NOTE: fake 'exit' subprog should be updated as well. */
for (i = 0; i <= env->subprog_cnt; i++) {
- if (env->subprog_info[i].start < off)
+ if (env->subprog_info[i].start <= off)
continue;
env->subprog_info[i].start += len - 1;
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3c7f3b4c453c..91d5c38eb7e5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -10,6 +10,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task.h>
+#include <linux/sched/smt.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/oom.h>
@@ -367,6 +368,12 @@ static void lockdep_release_cpus_lock(void)
#endif /* CONFIG_HOTPLUG_CPU */
+/*
+ * Architectures that need SMT-specific errata handling during SMT hotplug
+ * should override this.
+ */
+void __weak arch_smt_update(void) { }
+
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
EXPORT_SYMBOL_GPL(cpu_smt_control);
@@ -1011,6 +1018,7 @@ out:
* concurrent CPU hotplug via cpu_add_remove_lock.
*/
lockup_detector_cleanup();
+ arch_smt_update();
return ret;
}
@@ -1139,6 +1147,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
ret = cpuhp_up_callbacks(cpu, st, target);
out:
cpus_write_unlock();
+ arch_smt_update();
return ret;
}
@@ -2055,12 +2064,6 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
}
-/*
- * Architectures that need SMT-specific errata handling during SMT hotplug
- * should override this.
- */
-void __weak arch_smt_update(void) { };
-
static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{
int cpu, ret = 0;
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 22a12ab5a5e9..375c77e8d52f 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -309,7 +309,12 @@ int dma_direct_supported(struct device *dev, u64 mask)
min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
- return mask >= phys_to_dma(dev, min_mask);
+ /*
+ * This check needs to be against the actual bit mask value, so
+ * use __phys_to_dma() here so that the SME encryption mask isn't
+ * part of the check.
+ */
+ return mask >= __phys_to_dma(dev, min_mask);
}
int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 5731daa09a32..045930e32c0e 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -679,7 +679,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
}
if (!dev_is_dma_coherent(dev) &&
- (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0 &&
+ dev_addr != DIRECT_MAPPING_ERROR)
arch_sync_dma_for_device(dev, phys, size, dir);
return dev_addr;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 96d4bee83489..abbd8da9ac21 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -572,7 +572,9 @@ static void put_uprobe(struct uprobe *uprobe)
* gets called, we don't get a chance to remove uprobe from
* delayed_uprobe_list from remove_breakpoint(). Do it here.
*/
+ mutex_lock(&delayed_uprobe_lock);
delayed_uprobe_remove(uprobe, NULL);
+ mutex_unlock(&delayed_uprobe_lock);
kfree(uprobe);
}
}
@@ -829,7 +831,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
BUG_ON((uprobe->offset & ~PAGE_MASK) +
UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
- smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
+ smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
set_bit(UPROBE_COPY_INSN, &uprobe->flags);
out:
@@ -2178,10 +2180,18 @@ static void handle_swbp(struct pt_regs *regs)
* After we hit the bp, _unregister + _register can install the
* new and not-yet-analyzed uprobe at the same address, restart.
*/
- smp_rmb(); /* pairs with wmb() in install_breakpoint() */
if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
goto out;
+ /*
+ * Pairs with the smp_wmb() in prepare_uprobe().
+ *
+ * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
+ * we must also see the stores to &uprobe->arch performed by the
+ * prepare_uprobe() call.
+ */
+ smp_rmb();
+
/* Tracing handlers use ->utask to communicate with fetch methods */
if (!get_utask())
goto out;
diff --git a/kernel/fork.c b/kernel/fork.c
index 07cddff89c7b..e2a5156bc9c3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -240,8 +240,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
* free_thread_stack() can be called in interrupt context,
* so cache the vm_struct.
*/
- if (stack)
+ if (stack) {
tsk->stack_vm_area = find_vm_area(stack);
+ tsk->stack = stack;
+ }
return stack;
#else
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
@@ -288,7 +290,10 @@ static struct kmem_cache *thread_stack_cache;
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
- return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
+ unsigned long *stack;
+ stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
+ tsk->stack = stack;
+ return stack;
}
static void free_thread_stack(struct task_struct *tsk)
diff --git a/kernel/futex.c b/kernel/futex.c
index f423f9b6577e..5cc8083a4c89 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1148,11 +1148,65 @@ out_error:
return ret;
}
+static int handle_exit_race(u32 __user *uaddr, u32 uval,
+ struct task_struct *tsk)
+{
+ u32 uval2;
+
+ /*
+ * If PF_EXITPIDONE is not yet set, then try again.
+ */
+ if (tsk && !(tsk->flags & PF_EXITPIDONE))
+ return -EAGAIN;
+
+ /*
+ * Reread the user space value to handle the following situation:
+ *
+ * CPU0 CPU1
+ *
+ * sys_exit() sys_futex()
+ * do_exit() futex_lock_pi()
+ * futex_lock_pi_atomic()
+ * exit_signals(tsk) No waiters:
+ * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
+ * mm_release(tsk) Set waiter bit
+ * exit_robust_list(tsk) { *uaddr = 0x80000PID;
+ * Set owner died attach_to_pi_owner() {
+ * *uaddr = 0xC0000000; tsk = get_task(PID);
+ * } if (!tsk->flags & PF_EXITING) {
+ * ... attach();
+ * tsk->flags |= PF_EXITPIDONE; } else {
+ * if (!(tsk->flags & PF_EXITPIDONE))
+ * return -EAGAIN;
+ * return -ESRCH; <--- FAIL
+ * }
+ *
+ * Returning ESRCH unconditionally is wrong here because the
+ * user space value has been changed by the exiting task.
+ *
+ * The same logic applies to the case where the exiting task is
+ * already gone.
+ */
+ if (get_futex_value_locked(&uval2, uaddr))
+ return -EFAULT;
+
+ /* If the user space value has changed, try again. */
+ if (uval2 != uval)
+ return -EAGAIN;
+
+ /*
+ * The exiting task did not have a robust list, the robust list was
+ * corrupted or the user space value in *uaddr is simply bogus.
+ * Give up and tell user space.
+ */
+ return -ESRCH;
+}
+
/*
* Lookup the task for the TID provided from user space and attach to
* it after doing proper sanity checks.
*/
-static int attach_to_pi_owner(u32 uval, union futex_key *key,
+static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
struct futex_pi_state **ps)
{
pid_t pid = uval & FUTEX_TID_MASK;
@@ -1162,12 +1216,15 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
/*
* We are the first waiter - try to look up the real owner and attach
* the new pi_state to it, but bail out when TID = 0 [1]
+ *
+ * The !pid check is paranoid. None of the call sites should end up
+ * with pid == 0, but better safe than sorry. Let the caller retry
*/
if (!pid)
- return -ESRCH;
+ return -EAGAIN;
p = find_get_task_by_vpid(pid);
if (!p)
- return -ESRCH;
+ return handle_exit_race(uaddr, uval, NULL);
if (unlikely(p->flags & PF_KTHREAD)) {
put_task_struct(p);
@@ -1187,7 +1244,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
* set, we know that the task has finished the
* cleanup:
*/
- int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
+ int ret = handle_exit_race(uaddr, uval, p);
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
@@ -1244,7 +1301,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
* We are the first waiter - try to look up the owner based on
* @uval and attach to it.
*/
- return attach_to_pi_owner(uval, key, ps);
+ return attach_to_pi_owner(uaddr, uval, key, ps);
}
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
@@ -1352,7 +1409,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
* attach to the owner. If that fails, no harm done, we only
* set the FUTEX_WAITERS bit in the user space variable.
*/
- return attach_to_pi_owner(uval, key, ps);
+ return attach_to_pi_owner(uaddr, newval, key, ps);
}
/**
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index f4f29b9d90ee..45b68b4ea48b 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -94,15 +94,15 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
return nodes;
}
-static int irq_build_affinity_masks(const struct irq_affinity *affd,
- int startvec, int numvecs,
- cpumask_var_t *node_to_cpumask,
- const struct cpumask *cpu_mask,
- struct cpumask *nmsk,
- struct cpumask *masks)
+static int __irq_build_affinity_masks(const struct irq_affinity *affd,
+ int startvec, int numvecs, int firstvec,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ struct cpumask *nmsk,
+ struct irq_affinity_desc *masks)
{
int n, nodes, cpus_per_vec, extra_vecs, done = 0;
- int last_affv = affd->pre_vectors + numvecs;
+ int last_affv = firstvec + numvecs;
int curvec = startvec;
nodemask_t nodemsk = NODE_MASK_NONE;
@@ -117,12 +117,13 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
*/
if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) {
- cpumask_copy(masks + curvec, node_to_cpumask[n]);
- if (++done == numvecs)
- break;
+ cpumask_or(&masks[curvec].mask,
+ &masks[curvec].mask,
+ node_to_cpumask[n]);
if (++curvec == last_affv)
- curvec = affd->pre_vectors;
+ curvec = firstvec;
}
+ done = numvecs;
goto out;
}
@@ -130,7 +131,7 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
int ncpus, v, vecs_to_assign, vecs_per_node;
/* Spread the vectors per node */
- vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes;
+ vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
/* Get the cpus on this node which are in the mask */
cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
@@ -151,14 +152,15 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
cpus_per_vec++;
--extra_vecs;
}
- irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
+ irq_spread_init_one(&masks[curvec].mask, nmsk,
+ cpus_per_vec);
}
done += v;
if (done >= numvecs)
break;
if (curvec >= last_affv)
- curvec = affd->pre_vectors;
+ curvec = firstvec;
--nodes;
}
@@ -166,20 +168,77 @@ out:
return done;
}
+/*
+ * build affinity in two stages:
+ * 1) spread present CPU on these vectors
+ * 2) spread other possible CPUs on these vectors
+ */
+static int irq_build_affinity_masks(const struct irq_affinity *affd,
+ int startvec, int numvecs, int firstvec,
+ cpumask_var_t *node_to_cpumask,
+ struct irq_affinity_desc *masks)
+{
+ int curvec = startvec, nr_present, nr_others;
+ int ret = -ENOMEM;
+ cpumask_var_t nmsk, npresmsk;
+
+ if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ return ret;
+
+ if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
+ goto fail;
+
+ ret = 0;
+ /* Stabilize the cpumasks */
+ get_online_cpus();
+ build_node_to_cpumask(node_to_cpumask);
+
+ /* Spread on present CPUs starting from affd->pre_vectors */
+ nr_present = __irq_build_affinity_masks(affd, curvec, numvecs,
+ firstvec, node_to_cpumask,
+ cpu_present_mask, nmsk, masks);
+
+ /*
+ * Spread on non present CPUs starting from the next vector to be
+ * handled. If the spreading of present CPUs already exhausted the
+ * vector space, assign the non present CPUs to the already spread
+ * out vectors.
+ */
+ if (nr_present >= numvecs)
+ curvec = firstvec;
+ else
+ curvec = firstvec + nr_present;
+ cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
+ nr_others = __irq_build_affinity_masks(affd, curvec, numvecs,
+ firstvec, node_to_cpumask,
+ npresmsk, nmsk, masks);
+ put_online_cpus();
+
+ if (nr_present < numvecs)
+ WARN_ON(nr_present + nr_others < numvecs);
+
+ free_cpumask_var(npresmsk);
+
+ fail:
+ free_cpumask_var(nmsk);
+ return ret;
+}
+
/**
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
* @nvecs: The total number of vectors
* @affd: Description of the affinity requirements
*
- * Returns the masks pointer or NULL if allocation failed.
+ * Returns the irq_affinity_desc pointer or NULL if allocation failed.
*/
-struct cpumask *
+struct irq_affinity_desc *
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
{
int affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
int curvec, usedvecs;
- cpumask_var_t nmsk, npresmsk, *node_to_cpumask;
- struct cpumask *masks = NULL;
+ cpumask_var_t *node_to_cpumask;
+ struct irq_affinity_desc *masks = NULL;
+ int i, nr_sets;
/*
* If there aren't any vectors left after applying the pre/post
@@ -188,15 +247,9 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
if (nvecs == affd->pre_vectors + affd->post_vectors)
return NULL;
- if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
- return NULL;
-
- if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
- goto outcpumsk;
-
node_to_cpumask = alloc_node_to_cpumask();
if (!node_to_cpumask)
- goto outnpresmsk;
+ return NULL;
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
if (!masks)
@@ -204,32 +257,29 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
/* Fill out vectors at the beginning that don't need affinity */
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
- cpumask_copy(masks + curvec, irq_default_affinity);
-
- /* Stabilize the cpumasks */
- get_online_cpus();
- build_node_to_cpumask(node_to_cpumask);
-
- /* Spread on present CPUs starting from affd->pre_vectors */
- usedvecs = irq_build_affinity_masks(affd, curvec, affvecs,
- node_to_cpumask, cpu_present_mask,
- nmsk, masks);
-
+ cpumask_copy(&masks[curvec].mask, irq_default_affinity);
/*
- * Spread on non present CPUs starting from the next vector to be
- * handled. If the spreading of present CPUs already exhausted the
- * vector space, assign the non present CPUs to the already spread
- * out vectors.
+ * Spread on present CPUs starting from affd->pre_vectors. If we
+ * have multiple sets, build each sets affinity mask separately.
*/
- if (usedvecs >= affvecs)
- curvec = affd->pre_vectors;
- else
- curvec = affd->pre_vectors + usedvecs;
- cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
- usedvecs += irq_build_affinity_masks(affd, curvec, affvecs,
- node_to_cpumask, npresmsk,
- nmsk, masks);
- put_online_cpus();
+ nr_sets = affd->nr_sets;
+ if (!nr_sets)
+ nr_sets = 1;
+
+ for (i = 0, usedvecs = 0; i < nr_sets; i++) {
+ int this_vecs = affd->sets ? affd->sets[i] : affvecs;
+ int ret;
+
+ ret = irq_build_affinity_masks(affd, curvec, this_vecs,
+ curvec, node_to_cpumask, masks);
+ if (ret) {
+ kfree(masks);
+ masks = NULL;
+ goto outnodemsk;
+ }
+ curvec += this_vecs;
+ usedvecs += this_vecs;
+ }
/* Fill out vectors at the end that don't need affinity */
if (usedvecs >= affvecs)
@@ -237,14 +287,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
else
curvec = affd->pre_vectors + usedvecs;
for (; curvec < nvecs; curvec++)
- cpumask_copy(masks + curvec, irq_default_affinity);
+ cpumask_copy(&masks[curvec].mask, irq_default_affinity);
+
+ /* Mark the managed interrupts */
+ for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
+ masks[i].is_managed = 1;
outnodemsk:
free_node_to_cpumask(node_to_cpumask);
-outnpresmsk:
- free_cpumask_var(npresmsk);
-outcpumsk:
- free_cpumask_var(nmsk);
return masks;
}
@@ -258,13 +308,21 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
{
int resv = affd->pre_vectors + affd->post_vectors;
int vecs = maxvec - resv;
- int ret;
+ int set_vecs;
if (resv > minvec)
return 0;
- get_online_cpus();
- ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv;
- put_online_cpus();
- return ret;
+ if (affd->nr_sets) {
+ int i;
+
+ for (i = 0, set_vecs = 0; i < affd->nr_sets; i++)
+ set_vecs += affd->sets[i];
+ } else {
+ get_online_cpus();
+ set_vecs = cpumask_weight(cpu_possible_mask);
+ put_online_cpus();
+ }
+
+ return resv + min(set_vecs, vecs);
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index a2b3d9de999c..34e969069488 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -929,7 +929,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
break;
/*
* Bail out if the outer chip is not set up
- * and the interrrupt supposed to be started
+ * and the interrupt supposed to be started
* right away.
*/
if (WARN_ON(is_chained))
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 6a682c229e10..5d5378ea0afe 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -169,7 +169,7 @@ static void devm_irq_desc_release(struct device *dev, void *res)
* @cnt: Number of consecutive irqs to allocate
* @node: Preferred node on which the irq descriptor should be allocated
* @owner: Owning module (can be NULL)
- * @affinity: Optional pointer to an affinity mask array of size @cnt
+ * @affinity: Optional pointer to an irq_affinity_desc array of size @cnt
* which hints where the irq descriptors should be allocated
* and which default affinities to use
*
@@ -179,7 +179,7 @@ static void devm_irq_desc_release(struct device *dev, void *res)
*/
int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
unsigned int cnt, int node, struct module *owner,
- const struct cpumask *affinity)
+ const struct irq_affinity_desc *affinity)
{
struct irq_desc_devres *dr;
int base;
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index 8b778e37dc6d..43e3d1be622c 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -56,7 +56,7 @@ int irq_reserve_ipi(struct irq_domain *domain,
unsigned int next;
/*
- * The IPI requires a seperate HW irq on each CPU. We require
+ * The IPI requires a separate HW irq on each CPU. We require
* that the destination mask is consecutive. If an
* implementation needs to support holes, it can reserve
* several IPI ranges.
@@ -172,7 +172,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
/*
* Get the real hardware irq number if the underlying implementation
- * uses a seperate irq per cpu. If the underlying implementation uses
+ * uses a separate irq per cpu. If the underlying implementation uses
* a single hardware irq for all cpus then the IPI send mechanism
* needs to take care of the cpu destinations.
*/
diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c
index dd20d0d528d4..98a20e1594ce 100644
--- a/kernel/irq/irq_sim.c
+++ b/kernel/irq/irq_sim.c
@@ -34,9 +34,20 @@ static struct irq_chip irq_sim_irqchip = {
static void irq_sim_handle_irq(struct irq_work *work)
{
struct irq_sim_work_ctx *work_ctx;
+ unsigned int offset = 0;
+ struct irq_sim *sim;
+ int irqnum;
work_ctx = container_of(work, struct irq_sim_work_ctx, work);
- handle_simple_irq(irq_to_desc(work_ctx->irq));
+ sim = container_of(work_ctx, struct irq_sim, work_ctx);
+
+ while (!bitmap_empty(work_ctx->pending, sim->irq_count)) {
+ offset = find_next_bit(work_ctx->pending,
+ sim->irq_count, offset);
+ clear_bit(offset, work_ctx->pending);
+ irqnum = irq_sim_irqnum(sim, offset);
+ handle_simple_irq(irq_to_desc(irqnum));
+ }
}
/**
@@ -63,6 +74,13 @@ int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs)
return sim->irq_base;
}
+ sim->work_ctx.pending = bitmap_zalloc(num_irqs, GFP_KERNEL);
+ if (!sim->work_ctx.pending) {
+ kfree(sim->irqs);
+ irq_free_descs(sim->irq_base, num_irqs);
+ return -ENOMEM;
+ }
+
for (i = 0; i < num_irqs; i++) {
sim->irqs[i].irqnum = sim->irq_base + i;
sim->irqs[i].enabled = false;
@@ -89,6 +107,7 @@ EXPORT_SYMBOL_GPL(irq_sim_init);
void irq_sim_fini(struct irq_sim *sim)
{
irq_work_sync(&sim->work_ctx.work);
+ bitmap_free(sim->work_ctx.pending);
irq_free_descs(sim->irq_base, sim->irq_count);
kfree(sim->irqs);
}
@@ -143,7 +162,7 @@ EXPORT_SYMBOL_GPL(devm_irq_sim_init);
void irq_sim_fire(struct irq_sim *sim, unsigned int offset)
{
if (sim->irqs[offset].enabled) {
- sim->work_ctx.irq = irq_sim_irqnum(sim, offset);
+ set_bit(offset, sim->work_ctx.pending);
irq_work_queue(&sim->work_ctx.work);
}
}
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 578d0e5f1b5b..ee062b7939d3 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -449,30 +449,34 @@ static void free_desc(unsigned int irq)
}
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
- const struct cpumask *affinity, struct module *owner)
+ const struct irq_affinity_desc *affinity,
+ struct module *owner)
{
- const struct cpumask *mask = NULL;
struct irq_desc *desc;
- unsigned int flags;
int i;
/* Validate affinity mask(s) */
if (affinity) {
- for (i = 0, mask = affinity; i < cnt; i++, mask++) {
- if (cpumask_empty(mask))
+ for (i = 0; i < cnt; i++, i++) {
+ if (cpumask_empty(&affinity[i].mask))
return -EINVAL;
}
}
- flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
- mask = NULL;
-
for (i = 0; i < cnt; i++) {
+ const struct cpumask *mask = NULL;
+ unsigned int flags = 0;
+
if (affinity) {
- node = cpu_to_node(cpumask_first(affinity));
- mask = affinity;
+ if (affinity->is_managed) {
+ flags = IRQD_AFFINITY_MANAGED |
+ IRQD_MANAGED_SHUTDOWN;
+ }
+ mask = &affinity->mask;
+ node = cpu_to_node(cpumask_first(mask));
affinity++;
}
+
desc = alloc_desc(start + i, node, flags, mask, owner);
if (!desc)
goto err;
@@ -575,7 +579,7 @@ static void free_desc(unsigned int irq)
}
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
- const struct cpumask *affinity,
+ const struct irq_affinity_desc *affinity,
struct module *owner)
{
u32 i;
@@ -705,7 +709,7 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
*/
int __ref
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner, const struct cpumask *affinity)
+ struct module *owner, const struct irq_affinity_desc *affinity)
{
int start, ret;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3366d11c3e02..8b0be4bd6565 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -969,7 +969,7 @@ const struct irq_domain_ops irq_domain_simple_ops = {
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
- int node, const struct cpumask *affinity)
+ int node, const struct irq_affinity_desc *affinity)
{
unsigned int hint;
@@ -1281,7 +1281,7 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
*/
int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
- bool realloc, const struct cpumask *affinity)
+ bool realloc, const struct irq_affinity_desc *affinity)
{
int i, ret, virq;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9dbdccab3b6a..a4888ce4667a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -915,7 +915,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
#endif
/*
- * Interrupts which are not explicitely requested as threaded
+ * Interrupts which are not explicitly requested as threaded
* interrupts rely on the implicit bh/preempt disable of the hard irq
* context. So we need to disable bh here to avoid deadlocks and other
* side effects.
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 1f0985adf193..30cc217b8631 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -14,6 +14,7 @@ struct cpumap {
unsigned int available;
unsigned int allocated;
unsigned int managed;
+ unsigned int managed_allocated;
bool initialized;
bool online;
unsigned long alloc_map[IRQ_MATRIX_SIZE];
@@ -145,6 +146,27 @@ static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
return best_cpu;
}
+/* Find the best CPU which has the lowest number of managed IRQs allocated */
+static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
+ const struct cpumask *msk)
+{
+ unsigned int cpu, best_cpu, allocated = UINT_MAX;
+ struct cpumap *cm;
+
+ best_cpu = UINT_MAX;
+
+ for_each_cpu(cpu, msk) {
+ cm = per_cpu_ptr(m->maps, cpu);
+
+ if (!cm->online || cm->managed_allocated > allocated)
+ continue;
+
+ best_cpu = cpu;
+ allocated = cm->managed_allocated;
+ }
+ return best_cpu;
+}
+
/**
* irq_matrix_assign_system - Assign system wide entry in the matrix
* @m: Matrix pointer
@@ -269,7 +291,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
if (cpumask_empty(msk))
return -EINVAL;
- cpu = matrix_find_best_cpu(m, msk);
+ cpu = matrix_find_best_cpu_managed(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
@@ -282,6 +304,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
return -ENOSPC;
set_bit(bit, cm->alloc_map);
cm->allocated++;
+ cm->managed_allocated++;
m->total_allocated++;
*mapped_cpu = cpu;
trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
@@ -395,6 +418,8 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
clear_bit(bit, cm->alloc_map);
cm->allocated--;
+ if(managed)
+ cm->managed_allocated--;
if (cm->online)
m->total_allocated--;
@@ -464,13 +489,14 @@ void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
m->system_map);
- seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
+ seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
cpus_read_lock();
for_each_online_cpu(cpu) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
- seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
- cpu, cm->available, cm->managed, cm->allocated,
+ seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ",
+ cpu, cm->available, cm->managed,
+ cm->managed_allocated, cm->allocated,
m->matrix_bits, cm->alloc_map);
}
cpus_read_unlock();
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 4ca2fd46645d..ad26fbcfbfc8 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -23,11 +23,11 @@
* @nvec: The number of vectors used in this entry
* @affinity: Optional pointer to an affinity mask array size of @nvec
*
- * If @affinity is not NULL then a an affinity array[@nvec] is allocated
- * and the affinity masks from @affinity are copied.
+ * If @affinity is not NULL then an affinity array[@nvec] is allocated
+ * and the affinity masks and flags from @affinity are copied.
*/
-struct msi_desc *
-alloc_msi_entry(struct device *dev, int nvec, const struct cpumask *affinity)
+struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
+ const struct irq_affinity_desc *affinity)
{
struct msi_desc *desc;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index d867d6ddafdd..6d2fa6914b30 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -66,7 +66,7 @@ static int try_one_irq(struct irq_desc *desc, bool force)
raw_spin_lock(&desc->lock);
/*
- * PER_CPU, nested thread interrupts and interrupts explicitely
+ * PER_CPU, nested thread interrupts and interrupts explicitly
* marked polled are excluded from polling.
*/
if (irq_settings_is_per_cpu(desc) ||
@@ -76,7 +76,7 @@ static int try_one_irq(struct irq_desc *desc, bool force)
/*
* Do not poll disabled interrupts unless the spurious
- * disabled poller asks explicitely.
+ * disabled poller asks explicitly.
*/
if (irqd_irq_disabled(&desc->irq_data) && !force)
goto out;
@@ -292,7 +292,7 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
* So in case a thread is woken, we just note the fact and
* defer the analysis to the next hardware interrupt.
*
- * The threaded handlers store whether they sucessfully
+ * The threaded handlers store whether they successfully
* handled an interrupt and we check whether that number
* changed versus the last invocation.
*
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3ebd09efe72a..97959d7b77e2 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -56,7 +56,7 @@ struct kcov {
struct task_struct *t;
};
-static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
{
unsigned int mode;
@@ -78,7 +78,7 @@ static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
return mode == needed_mode;
}
-static unsigned long canonicalize_ip(unsigned long ip)
+static notrace unsigned long canonicalize_ip(unsigned long ip)
{
#ifdef CONFIG_RANDOMIZE_BASE
ip -= kaslr_offset();
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 35cf0ad29718..f1d0e00a3971 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -16,6 +16,7 @@
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/kexec.h>
+#include <linux/memblock.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/fs.h>
@@ -76,7 +77,7 @@ void * __weak arch_kexec_kernel_image_load(struct kimage *image)
return kexec_image_load_default(image);
}
-static int kexec_image_post_load_cleanup_default(struct kimage *image)
+int kexec_image_post_load_cleanup_default(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
return 0;
@@ -499,8 +500,60 @@ static int locate_mem_hole_callback(struct resource *res, void *arg)
return locate_mem_hole_bottom_up(start, end, kbuf);
}
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+static int kexec_walk_memblock(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *))
+{
+ return 0;
+}
+#else
+static int kexec_walk_memblock(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *))
+{
+ int ret = 0;
+ u64 i;
+ phys_addr_t mstart, mend;
+ struct resource res = { };
+
+ if (kbuf->image->type == KEXEC_TYPE_CRASH)
+ return func(&crashk_res, kbuf);
+
+ if (kbuf->top_down) {
+ for_each_free_mem_range_reverse(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+ &mstart, &mend, NULL) {
+ /*
+ * In memblock, end points to the first byte after the
+ * range while in kexec, end points to the last byte
+ * in the range.
+ */
+ res.start = mstart;
+ res.end = mend - 1;
+ ret = func(&res, kbuf);
+ if (ret)
+ break;
+ }
+ } else {
+ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+ &mstart, &mend, NULL) {
+ /*
+ * In memblock, end points to the first byte after the
+ * range while in kexec, end points to the last byte
+ * in the range.
+ */
+ res.start = mstart;
+ res.end = mend - 1;
+ ret = func(&res, kbuf);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+#endif
+
/**
- * arch_kexec_walk_mem - call func(data) on free memory regions
+ * kexec_walk_resources - call func(data) on free memory regions
* @kbuf: Context info for the search. Also passed to @func.
* @func: Function to call for each memory region.
*
@@ -508,8 +561,8 @@ static int locate_mem_hole_callback(struct resource *res, void *arg)
* and that value will be returned. If all free regions are visited without
* func returning non-zero, then zero will be returned.
*/
-int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(struct resource *, void *))
+static int kexec_walk_resources(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *))
{
if (kbuf->image->type == KEXEC_TYPE_CRASH)
return walk_iomem_res_desc(crashk_res.desc,
@@ -532,7 +585,14 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf)
{
int ret;
- ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback);
+ /* Arch knows where to place */
+ if (kbuf->mem != KEXEC_BUF_MEM_UNKNOWN)
+ return 0;
+
+ if (IS_ENABLED(CONFIG_ARCH_DISCARD_MEMBLOCK))
+ ret = kexec_walk_resources(kbuf, locate_mem_hole_callback);
+ else
+ ret = kexec_walk_memblock(kbuf, locate_mem_hole_callback);
return ret == 1 ? 0 : -EADDRNOTAVAIL;
}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 35b50823d83b..98e76cad128b 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -318,23 +318,12 @@ static int suspend_stats_show(struct seq_file *s, void *unused)
return 0;
}
-
-static int suspend_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, suspend_stats_show, NULL);
-}
-
-static const struct file_operations suspend_stats_operations = {
- .open = suspend_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(suspend_stats);
static int __init pm_debugfs_init(void)
{
debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
- NULL, NULL, &suspend_stats_operations);
+ NULL, NULL, &suspend_stats_fops);
return 0;
}
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 86d72ffb811b..b7a82502857a 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -184,7 +184,7 @@ static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
c->target_value = value;
}
-static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
+static int pm_qos_debug_show(struct seq_file *s, void *unused)
{
struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
struct pm_qos_constraints *c;
@@ -245,18 +245,7 @@ out:
return 0;
}
-static int pm_qos_dbg_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pm_qos_dbg_show_requests,
- inode->i_private);
-}
-
-static const struct file_operations pm_qos_debug_fops = {
- .open = pm_qos_dbg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pm_qos_debug);
/**
* pm_qos_update_target - manages the constraints list and calls the notifiers
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 80b34dffdfb9..c2cee9db5204 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -261,9 +261,6 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
{
- if (mode & PTRACE_MODE_SCHED)
- return false;
-
if (mode & PTRACE_MODE_NOAUDIT)
return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
else
@@ -331,16 +328,9 @@ ok:
!ptrace_has_cap(mm->user_ns, mode)))
return -EPERM;
- if (mode & PTRACE_MODE_SCHED)
- return 0;
return security_ptrace_access_check(task, mode);
}
-bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
-{
- return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
-}
-
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
{
int err;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 091e089063be..6fedf3a98581 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5738,15 +5738,10 @@ int sched_cpu_activate(unsigned int cpu)
#ifdef CONFIG_SCHED_SMT
/*
- * The sched_smt_present static key needs to be evaluated on every
- * hotplug event because at boot time SMT might be disabled when
- * the number of booted CPUs is limited.
- *
- * If then later a sibling gets hotplugged, then the key would stay
- * off and SMT scheduling would never be functional.
+ * When going up, increment the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
- static_branch_enable_cpuslocked(&sched_smt_present);
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_inc_cpuslocked(&sched_smt_present);
#endif
set_cpu_active(cpu, true);
@@ -5790,6 +5785,14 @@ int sched_cpu_deactivate(unsigned int cpu)
*/
synchronize_rcu_mult(call_rcu, call_rcu_sched);
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going down, decrement the number of cores with SMT present.
+ */
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_dec_cpuslocked(&sched_smt_present);
+#endif
+
if (!sched_smp_initialized)
return 0;
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
index 5e54cbcae673..22bd8980f32f 100644
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Scheduler code and data structures related to cpufreq.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include "sched.h"
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3fffad3bc8a8..626ddd4ffa43 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CPUFreq governor based on scheduler-provided CPU utilization data.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 3d7355d7c3e3..fe24de3fbc93 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -136,8 +136,18 @@
static int psi_bug __read_mostly;
-bool psi_disabled __read_mostly;
-core_param(psi_disabled, psi_disabled, bool, 0644);
+DEFINE_STATIC_KEY_FALSE(psi_disabled);
+
+#ifdef CONFIG_PSI_DEFAULT_DISABLED
+bool psi_enable;
+#else
+bool psi_enable = true;
+#endif
+static int __init setup_psi(char *str)
+{
+ return kstrtobool(str, &psi_enable) == 0;
+}
+__setup("psi=", setup_psi);
/* Running averages - we need to be higher-res than loadavg */
#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
@@ -169,8 +179,10 @@ static void group_init(struct psi_group *group)
void __init psi_init(void)
{
- if (psi_disabled)
+ if (!psi_enable) {
+ static_branch_enable(&psi_disabled);
return;
+ }
psi_period = jiffies_to_nsecs(PSI_FREQ);
group_init(&psi_system);
@@ -549,7 +561,7 @@ void psi_memstall_enter(unsigned long *flags)
struct rq_flags rf;
struct rq *rq;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
*flags = current->flags & PF_MEMSTALL;
@@ -579,7 +591,7 @@ void psi_memstall_leave(unsigned long *flags)
struct rq_flags rf;
struct rq *rq;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (*flags)
@@ -600,7 +612,7 @@ void psi_memstall_leave(unsigned long *flags)
#ifdef CONFIG_CGROUPS
int psi_cgroup_alloc(struct cgroup *cgroup)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return 0;
cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
@@ -612,7 +624,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
void psi_cgroup_free(struct cgroup *cgroup)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
cancel_delayed_work_sync(&cgroup->psi.clock_work);
@@ -637,7 +649,7 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)
struct rq_flags rf;
struct rq *rq;
- if (psi_disabled) {
+ if (static_branch_likely(&psi_disabled)) {
/*
* Lame to do this here, but the scheduler cannot be locked
* from the outside, so we move cgroups from inside sched/.
@@ -673,7 +685,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
{
int full;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return -EOPNOTSUPP;
update_stats(group);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 618577fc9aa8..4e524ab589c9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -23,6 +23,7 @@
#include <linux/sched/prio.h>
#include <linux/sched/rt.h>
#include <linux/sched/signal.h>
+#include <linux/sched/smt.h>
#include <linux/sched/stat.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/task.h>
@@ -936,9 +937,6 @@ static inline int cpu_of(struct rq *rq)
#ifdef CONFIG_SCHED_SMT
-
-extern struct static_key_false sched_smt_present;
-
extern void __update_idle_core(struct rq *rq);
static inline void update_idle_core(struct rq *rq)
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 4904c4677000..aa0de240fb41 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -66,7 +66,7 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
{
int clear = 0, set = TSK_RUNNING;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (!wakeup || p->sched_psi_wake_requeue) {
@@ -86,7 +86,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
{
int clear = TSK_RUNNING, set = 0;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (!sleep) {
@@ -102,7 +102,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
static inline void psi_ttwu_dequeue(struct task_struct *p)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
/*
* Is the task being migrated during a wakeup? Make sure to
@@ -128,7 +128,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
static inline void psi_task_tick(struct rq *rq)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (unlikely(rq->curr->flags & PF_MEMSTALL))
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index e42892926244..b193a59fc05b 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -11,6 +11,7 @@
*/
#include <linux/stackleak.h>
+#include <linux/kprobes.h>
#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
#include <linux/jump_label.h>
@@ -47,7 +48,7 @@ int stack_erasing_sysctl(struct ctl_table *table, int write,
#define skip_erasing() false
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
-asmlinkage void stackleak_erase(void)
+asmlinkage void notrace stackleak_erase(void)
{
/* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
unsigned long kstack_ptr = current->lowest_stack;
@@ -101,8 +102,9 @@ asmlinkage void stackleak_erase(void)
/* Reset the 'lowest_stack' value for the next syscall */
current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
}
+NOKPROBE_SYMBOL(stackleak_erase);
-void __used stackleak_track_stack(void)
+void __used notrace stackleak_track_stack(void)
{
/*
* N.B. stackleak_erase() fills the kernel stack with the poison value,
diff --git a/kernel/sys.c b/kernel/sys.c
index 123bd73046ec..64b5a230f38d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -121,6 +121,9 @@
#ifndef SVE_GET_VL
# define SVE_GET_VL() (-EINVAL)
#endif
+#ifndef PAC_RESET_KEYS
+# define PAC_RESET_KEYS(a, b) (-EINVAL)
+#endif
/*
* this is where the system-wide overflow UID and GID are defined, for
@@ -2476,6 +2479,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
return -EINVAL;
error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
break;
+ case PR_PAC_RESET_KEYS:
+ if (arg3 || arg4 || arg5)
+ return -EINVAL;
+ error = PAC_RESET_KEYS(me, arg2);
+ break;
default:
error = -EINVAL;
break;
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index fa5de5e8de61..2c97e8c2d29f 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Alarmtimer interface
*
@@ -10,10 +11,6 @@
* Copyright (C) 2010 IBM Corperation
*
* Author: John Stultz <john.stultz@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/time.h>
#include <linux/hrtimer.h>
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 8c0e4092f661..5e77662dd2d9 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/clockevents.c
- *
* This file contains functions which manage clock event devices.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
- *
- * This code is licenced under the GPL version 2. For details see
- * kernel-base/COPYING.
*/
#include <linux/clockchips.h>
@@ -39,10 +35,8 @@ static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
u64 clc = (u64) latch << evt->shift;
u64 rnd;
- if (unlikely(!evt->mult)) {
+ if (WARN_ON(!evt->mult))
evt->mult = 1;
- WARN_ON(1);
- }
rnd = (u64) evt->mult - 1;
/*
@@ -164,10 +158,8 @@ void clockevents_switch_state(struct clock_event_device *dev,
* on it, so fix it up and emit a warning:
*/
if (clockevent_state_oneshot(dev)) {
- if (unlikely(!dev->mult)) {
+ if (WARN_ON(!dev->mult))
dev->mult = 1;
- WARN_ON(1);
- }
}
}
}
@@ -315,10 +307,8 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
int64_t delta;
int rc;
- if (unlikely(expires < 0)) {
- WARN_ON_ONCE(1);
+ if (WARN_ON_ONCE(expires < 0))
return -ETIME;
- }
dev->next_event = expires;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index ffe081623aec..3bcc19ceb073 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -1,26 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * linux/kernel/time/clocksource.c
- *
* This file contains the functions which manage clocksource drivers.
*
* Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * TODO WishList:
- * o Allow clocksource drivers to be unregistered
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 9cdd74bd2d27..f5cfa1b73d6f 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1,34 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/hrtimer.c
- *
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
*
* High-resolution kernel timers
*
- * In contrast to the low-resolution timeout API implemented in
- * kernel/timer.c, hrtimers provide finer resolution and accuracy
- * depending on system configuration and capabilities.
- *
- * These timers are currently used for:
- * - itimers
- * - POSIX timers
- * - nanosleep
- * - precise in-kernel timing
+ * In contrast to the low-resolution timeout API, aka timer wheel,
+ * hrtimers provide finer resolution and accuracy depending on system
+ * configuration and capabilities.
*
* Started by: Thomas Gleixner and Ingo Molnar
*
* Credits:
- * based on kernel/timer.c
+ * Based on the original timer wheel code
*
* Help, testing, suggestions, bugfixes, improvements were
* provided by:
*
* George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
* et. al.
- *
- * For licencing details see kernel-base/COPYING
*/
#include <linux/cpu.h>
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 9a65713c8309..02068b2d5862 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/itimer.c
- *
* Copyright (C) 1992 Darren Senn
*/
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 497719127bf9..dc1b6f1929f9 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -1,25 +1,9 @@
-/***********************************************************************
-* linux/kernel/time/jiffies.c
-*
-* This file contains the jiffies based clocksource.
-*
-* Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*
-************************************************************************/
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This file contains the jiffies based clocksource.
+ *
+ * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
+ */
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/module.h>
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index c5e0cba3b39c..bc3a3c37ec9c 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rtc.h>
-#include <linux/math64.h>
#include "ntp_internal.h"
#include "timekeeping_internal.h"
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index fe56c4e06c51..425bbfce6819 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * posix-clock.c - support for dynamic clock devices
+ * Support for dynamic clock devices
*
* Copyright (C) 2010 OMICRON electronics GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/device.h>
#include <linux/export.h>
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index 989ccf028bde..a51895486e5e 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Dummy stubs used when CONFIG_POSIX_TIMERS=n
*
* Created by: Nicolas Pitre, July 2016
* Copyright: (C) 2016 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/linkage.h>
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index bd62b5eeb5a0..0e84bb72a3da 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -1,34 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * linux/kernel/posix-timers.c
- *
- *
* 2002-10-15 Posix Clocks & timers
* by George Anzinger george@mvista.com
- *
* Copyright (C) 2002 2003 by MontaVista Software.
*
* 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
* Copyright (C) 2004 Boris Hu
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
- */
-
-/* These are all the functions necessary to implement
- * POSIX clocks & timers
+ * These are all the functions necessary to implement POSIX clocks & timers
*/
#include <linux/mm.h>
#include <linux/interrupt.h>
@@ -289,9 +268,6 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
{
struct hrtimer *timer = &timr->it.real.timer;
- if (!timr->it_interval)
- return;
-
timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
timr->it_interval);
hrtimer_restart(timer);
@@ -317,7 +293,7 @@ void posixtimer_rearm(struct kernel_siginfo *info)
if (!timr)
return;
- if (timr->it_requeue_pending == info->si_sys_private) {
+ if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
timr->kclock->timer_rearm(timr);
timr->it_active = 1;
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index cbc72c2c1fca..094b82ca95e5 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * sched_clock.c: Generic sched_clock() support, to extend low level
- * hardware time counters to full 64-bit ns values.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Generic sched_clock() support, to extend low level hardware time
+ * counters to full 64-bit ns values.
*/
#include <linux/clocksource.h>
#include <linux/init.h>
diff --git a/kernel/time/test_udelay.c b/kernel/time/test_udelay.c
index b0928ab3270f..77c63005dc4e 100644
--- a/kernel/time/test_udelay.c
+++ b/kernel/time/test_udelay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* udelay() test kernel module
*
@@ -7,15 +8,6 @@
* Specifying usecs of 0 or negative values will run multiples tests.
*
* Copyright (C) 2014 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/debugfs.h>
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index a59641fb88b6..5be6154e2fd2 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/tick-broadcast-hrtimer.c
- * This file emulates a local clock event device
- * via a pseudo clock device.
+ * Emulate a local clock event device via a pseudo clock device.
*/
#include <linux/cpu.h>
#include <linux/err.h>
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index be0aac2b4300..803fa67aace9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/tick-broadcast.c
- *
* This file contains functions which emulate a local clock-event
* device via a broadcast event source.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
- *
- * This code is licenced under the GPL version 2. For details see
- * kernel-base/COPYING.
*/
#include <linux/cpu.h>
#include <linux/err.h>
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 14de3727b18e..529143b4c8d2 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/tick-common.c
- *
* This file contains the base functions to manage periodic tick
* related events.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
- *
- * This code is licenced under the GPL version 2. For details see
- * kernel-base/COPYING.
*/
#include <linux/cpu.h>
#include <linux/err.h>
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 6fe615d57ebb..f9745d47425a 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/tick-oneshot.c
- *
* This file contains functions which manage high resolution tick
* related events.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
- *
- * This code is licenced under the GPL version 2. For details see
- * kernel-base/COPYING.
*/
#include <linux/cpu.h>
#include <linux/err.h>
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 69e673b88474..6fa52cd6df0b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/tick-sched.c
- *
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
@@ -8,8 +7,6 @@
* No idle tick implementation for low and high resolution timers
*
* Started by: Thomas Gleixner and Ingo Molnar
- *
- * Distribute under GPLv2.
*/
#include <linux/cpu.h>
#include <linux/err.h>
diff --git a/kernel/time/time.c b/kernel/time/time.c
index ad204cf6d001..5aa0a156e331 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -1,14 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time.c
- *
* Copyright (C) 1991, 1992 Linus Torvalds
*
- * This file contains the interface functions for the various
- * time related system calls: time, stime, gettimeofday, settimeofday,
- * adjtime
- */
-/*
- * Modification history kernel/time.c
+ * This file contains the interface functions for the various time related
+ * system calls: time, stime, gettimeofday, settimeofday, adjtime
+ *
+ * Modification history:
*
* 1993-09-02 Philip Gladstone
* Created file with time related functions from sched/core.c and adjtimex()
diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc
index f83bbb81600b..7ed0e0fb5831 100644
--- a/kernel/time/timeconst.bc
+++ b/kernel/time/timeconst.bc
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
scale=0
define gcd(a,b) {
diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c
index 7142580ad94f..589e0a552129 100644
--- a/kernel/time/timeconv.c
+++ b/kernel/time/timeconv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.0+
/*
* Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
* This file is part of the GNU C Library.
diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c
index 8afd78932bdf..85b98e727306 100644
--- a/kernel/time/timecounter.c
+++ b/kernel/time/timecounter.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * linux/kernel/time/timecounter.c
- *
- * based on code that migrated away from
- * linux/kernel/time/clocksource.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Based on clocksource code. See commit 74d23cc704d1
*/
-
#include <linux/export.h>
#include <linux/timecounter.h>
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2d110c948805..c801e25875a3 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/timekeeping.c
- *
- * Kernel timekeeping code and accessor functions
- *
- * This code was moved from linux/kernel/timer.c.
- * Please see that file for copyright and history logs.
- *
+ * Kernel timekeeping code and accessor functions. Based on code from
+ * timer.c, moved in commit 8524070b7982.
*/
-
#include <linux/timekeeper_internal.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -50,7 +45,9 @@ enum timekeeping_adv_mode {
static struct {
seqcount_t seq;
struct timekeeper timekeeper;
-} tk_core ____cacheline_aligned;
+} tk_core ____cacheline_aligned = {
+ .seq = SEQCNT_ZERO(tk_core.seq),
+};
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
static struct timekeeper shadow_timekeeper;
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index 238e4be60229..86489950d690 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* debugfs file to track time spent in suspend
*
* Copyright (c) 2011, Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/debugfs.h>
@@ -28,7 +19,7 @@
static unsigned int sleep_time_bin[NUM_BINS] = {0};
-static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
+static int tk_debug_sleep_time_show(struct seq_file *s, void *data)
{
unsigned int bin;
seq_puts(s, " time (secs) count\n");
@@ -42,18 +33,7 @@ static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
}
return 0;
}
-
-static int tk_debug_sleep_time_open(struct inode *inode, struct file *file)
-{
- return single_open(file, tk_debug_show_sleep_time, NULL);
-}
-
-static const struct file_operations tk_debug_sleep_time_fops = {
- .open = tk_debug_sleep_time_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(tk_debug_sleep_time);
static int __init tk_debug_sleep_time_init(void)
{
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index fa49cd753dea..444156debfa0 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/timer.c
- *
* Kernel internal timers
*
* Copyright (C) 1991, 1992 Linus Torvalds
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index d647dabdac97..98ba50dcb1b2 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * kernel/time/timer_list.c
- *
* List pending timers
*
* Copyright(C) 2006, Red Hat, Inc., Ingo Molnar
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/proc_fs.h>
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 08fcfe440c63..9864a35c8bb5 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -196,11 +196,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
i++;
} else if (fmt[i] == 'p' || fmt[i] == 's') {
mod[fmt_cnt]++;
- i++;
- if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
+ /* disallow any further format extensions */
+ if (fmt[i + 1] != 0 &&
+ !isspace(fmt[i + 1]) &&
+ !ispunct(fmt[i + 1]))
return -EINVAL;
fmt_cnt++;
- if (fmt[i - 1] == 's') {
+ if (fmt[i] == 's') {
if (str_seen)
/* allow only one '%s' per fmt string */
return -EINVAL;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f536f601bd46..e23eb9fc77aa 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -817,7 +817,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int profile_graph_entry(struct ftrace_graph_ent *trace)
{
- int index = trace->depth;
+ int index = current->curr_ret_stack;
function_profile_call(trace->func, 0, NULL, NULL);
@@ -852,7 +852,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
if (!fgraph_graph_time) {
int index;
- index = trace->depth;
+ index = current->curr_ret_stack;
/* Append this call time to the parent time to subtract */
if (index)
@@ -5460,6 +5460,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
if (ops->flags & FTRACE_OPS_FL_ENABLED)
ftrace_shutdown(ops, 0);
ops->flags |= FTRACE_OPS_FL_DELETED;
+ ftrace_free_filter(ops);
mutex_unlock(&ftrace_lock);
}
@@ -6814,6 +6815,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->curr_ret_stack = -1;
+ t->curr_ret_depth = -1;
/* Make sure the tasks see the -1 first: */
smp_wmb();
t->ret_stack = ret_stack_list[start++];
@@ -7038,6 +7040,7 @@ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
{
t->curr_ret_stack = -1;
+ t->curr_ret_depth = -1;
/*
* The idle task has no parent, it either has its own
* stack or no stack at all.
@@ -7068,6 +7071,7 @@ void ftrace_graph_init_task(struct task_struct *t)
/* Make sure we do not use the parent ret_stack */
t->ret_stack = NULL;
t->curr_ret_stack = -1;
+ t->curr_ret_depth = -1;
if (ftrace_graph_active) {
struct ftrace_ret_stack *ret_stack;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3b8c0e24ab30..447bd96ee658 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -512,12 +512,44 @@ enum {
* can only be modified by current, we can reuse trace_recursion.
*/
TRACE_IRQ_BIT,
+
+ /* Set if the function is in the set_graph_function file */
+ TRACE_GRAPH_BIT,
+
+ /*
+ * In the very unlikely case that an interrupt came in
+ * at a start of graph tracing, and we want to trace
+ * the function in that interrupt, the depth can be greater
+ * than zero, because of the preempted start of a previous
+ * trace. In an even more unlikely case, depth could be 2
+ * if a softirq interrupted the start of graph tracing,
+ * followed by an interrupt preempting a start of graph
+ * tracing in the softirq, and depth can even be 3
+ * if an NMI came in at the start of an interrupt function
+ * that preempted a softirq start of a function that
+ * preempted normal context!!!! Luckily, it can't be
+ * greater than 3, so the next two bits are a mask
+ * of what the depth is when we set TRACE_GRAPH_BIT
+ */
+
+ TRACE_GRAPH_DEPTH_START_BIT,
+ TRACE_GRAPH_DEPTH_END_BIT,
};
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
+#define trace_recursion_depth() \
+ (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
+#define trace_recursion_set_depth(depth) \
+ do { \
+ current->trace_recursion &= \
+ ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
+ current->trace_recursion |= \
+ ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
+ } while (0)
+
#define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
@@ -843,8 +875,9 @@ extern void __trace_graph_return(struct trace_array *tr,
extern struct ftrace_hash *ftrace_graph_hash;
extern struct ftrace_hash *ftrace_graph_notrace_hash;
-static inline int ftrace_graph_addr(unsigned long addr)
+static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
{
+ unsigned long addr = trace->func;
int ret = 0;
preempt_disable_notrace();
@@ -855,6 +888,14 @@ static inline int ftrace_graph_addr(unsigned long addr)
}
if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
+
+ /*
+ * This needs to be cleared on the return functions
+ * when the depth is zero.
+ */
+ trace_recursion_set(TRACE_GRAPH_BIT);
+ trace_recursion_set_depth(trace->depth);
+
/*
* If no irqs are to be traced, but a set_graph_function
* is set, and called by an interrupt handler, we still
@@ -872,6 +913,13 @@ out:
return ret;
}
+static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
+{
+ if (trace_recursion_test(TRACE_GRAPH_BIT) &&
+ trace->depth == trace_recursion_depth())
+ trace_recursion_clear(TRACE_GRAPH_BIT);
+}
+
static inline int ftrace_graph_notrace_addr(unsigned long addr)
{
int ret = 0;
@@ -885,7 +933,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
return ret;
}
#else
-static inline int ftrace_graph_addr(unsigned long addr)
+static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
{
return 1;
}
@@ -894,6 +942,8 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
{
return 0;
}
+static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
+{ }
#endif /* CONFIG_DYNAMIC_FTRACE */
extern unsigned int fgraph_max_depth;
@@ -901,7 +951,8 @@ extern unsigned int fgraph_max_depth;
static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
{
/* trace it when it is-nested-in or is a function enabled. */
- return !(trace->depth || ftrace_graph_addr(trace->func)) ||
+ return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
+ ftrace_graph_addr(trace)) ||
(trace->depth < 0) ||
(fgraph_max_depth && trace->depth >= fgraph_max_depth);
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 84a65173b1e9..5574e862de8d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -570,11 +570,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
}
}
+ kfree(op_stack);
+ kfree(inverts);
return prog;
out_free:
kfree(op_stack);
- kfree(prog_stack);
kfree(inverts);
+ kfree(prog_stack);
return ERR_PTR(ret);
}
@@ -1718,6 +1720,7 @@ static int create_filter(struct trace_event_call *call,
err = process_preds(call, filter_string, *filterp, pe);
if (err && set_str)
append_filter_err(pe, *filterp);
+ create_filter_finish(pe);
return err;
}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 2152d1e530cb..cd12ecb66eb9 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -732,8 +732,10 @@ int set_trigger_filter(char *filter_str,
/* The filter is for the 'trigger' event, not the triggered event */
ret = create_event_filter(file->event_call, filter_str, false, &filter);
- if (ret)
- goto out;
+ /*
+ * If create_event_filter() fails, filter still needs to be freed.
+ * Which the calling code will do with data->filter.
+ */
assign:
tmp = rcu_access_pointer(data->filter);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 169b3c44ee97..086af4f5c3e8 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -118,8 +118,8 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
struct trace_seq *s, u32 flags);
/* Add a function return address to the trace stack on thread info.*/
-int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+static int
+ftrace_push_return_trace(unsigned long ret, unsigned long func,
unsigned long frame_pointer, unsigned long *retp)
{
unsigned long long calltime;
@@ -177,9 +177,31 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
current->ret_stack[index].retp = retp;
#endif
- *depth = current->curr_ret_stack;
+ return 0;
+}
+
+int function_graph_enter(unsigned long ret, unsigned long func,
+ unsigned long frame_pointer, unsigned long *retp)
+{
+ struct ftrace_graph_ent trace;
+
+ trace.func = func;
+ trace.depth = ++current->curr_ret_depth;
+
+ if (ftrace_push_return_trace(ret, func,
+ frame_pointer, retp))
+ goto out;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace))
+ goto out_ret;
return 0;
+ out_ret:
+ current->curr_ret_stack--;
+ out:
+ current->curr_ret_depth--;
+ return -EBUSY;
}
/* Retrieve a function return address to the trace stack on thread info.*/
@@ -241,7 +263,13 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime;
trace->overrun = atomic_read(&current->trace_overrun);
- trace->depth = index;
+ trace->depth = current->curr_ret_depth--;
+ /*
+ * We still want to trace interrupts coming in if
+ * max_depth is set to 1. Make sure the decrement is
+ * seen before ftrace_graph_return.
+ */
+ barrier();
}
/*
@@ -255,6 +283,12 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
ftrace_pop_return_trace(&trace, &ret, frame_pointer);
trace.rettime = trace_clock_local();
+ ftrace_graph_return(&trace);
+ /*
+ * The ftrace_graph_return() may still access the current
+ * ret_stack structure, we need to make sure the update of
+ * curr_ret_stack is after that.
+ */
barrier();
current->curr_ret_stack--;
/*
@@ -267,13 +301,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
return ret;
}
- /*
- * The trace should run after decrementing the ret counter
- * in case an interrupt were to come in. We don't want to
- * lose the interrupt if max_depth is set.
- */
- ftrace_graph_return(&trace);
-
if (unlikely(!ret)) {
ftrace_graph_stop();
WARN_ON(1);
@@ -482,6 +509,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
int cpu;
int pc;
+ ftrace_graph_addr_finish(trace);
+
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@@ -505,6 +534,8 @@ void set_graph_array(struct trace_array *tr)
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
{
+ ftrace_graph_addr_finish(trace);
+
if (tracing_thresh &&
(trace->rettime - trace->calltime < tracing_thresh))
return;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index b7357f9f82a3..98ea6d28df15 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -208,6 +208,8 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
unsigned long flags;
int pc;
+ ftrace_graph_addr_finish(trace);
+
if (!func_prolog_dec(tr, &data, &flags))
return;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index a86b303e6c67..7d04b9890755 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -270,6 +270,8 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
unsigned long flags;
int pc;
+ ftrace_graph_addr_finish(trace);
+
if (!func_prolog_preempt_disable(tr, &data, &pc))
return;