diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpu.c | 2 | ||||
| -rw-r--r-- | kernel/events/uprobes.c | 6 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 1 | ||||
| -rw-r--r-- | kernel/printk/printk.c | 4 | ||||
| -rw-r--r-- | kernel/ptrace.c | 16 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 20 | ||||
| -rw-r--r-- | kernel/time/alarmtimer.c | 2 | 
7 files changed, 36 insertions, 15 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 5df20d6d1520..29de1a9352c0 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -228,7 +228,7 @@ static struct {  	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),  	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),  #ifdef CONFIG_DEBUG_LOCK_ALLOC -	.dep_map = {.name = "cpu_hotplug.lock" }, +	.dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),  #endif  }; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d4129bb05e5d..f9ec9add2164 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,  retry:  	/* Read the page with vaddr into memory */ -	ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); +	ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, +			&vma);  	if (ret <= 0)  		return ret; @@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)  	 * but we treat this as a 'remote' access since it is  	 * essentially a kernel access to the memory.  	 */ -	result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL); +	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, +			NULL);  	if (result < 0)  		return result; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0c5f1a5db654..9c4d30483264 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -721,6 +721,7 @@ int irq_set_parent(int irq, int parent_irq)  	irq_put_desc_unlock(desc, flags);  	return 0;  } +EXPORT_SYMBOL_GPL(irq_set_parent);  #endif  /* diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index d5e397315473..de08fc90baaf 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1769,6 +1769,10 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c  		cont_flush();  	} +	/* Skip empty continuation lines that couldn't be added - they just flush */ +	if (!text_len && (lflags & LOG_CONT)) +		return 0; +  	/* If it doesn't end in a newline, try to buffer the current line */  	if (!(lflags & LOG_NEWLINE)) {  		if (cont_add(facility, level, lflags, text, text_len)) diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 2a99027312a6..e6474f7272ec 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst  		int this_len, retval;  		this_len = (len > sizeof(buf)) ? sizeof(buf) : len; -		retval = access_process_vm(tsk, src, buf, this_len, 0); +		retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE);  		if (!retval) {  			if (copied)  				break; @@ -564,7 +564,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds  		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;  		if (copy_from_user(buf, src, this_len))  			return -EFAULT; -		retval = access_process_vm(tsk, dst, buf, this_len, 1); +		retval = access_process_vm(tsk, dst, buf, this_len, +				FOLL_FORCE | FOLL_WRITE);  		if (!retval) {  			if (copied)  				break; @@ -1127,7 +1128,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,  	unsigned long tmp;  	int copied; -	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); +	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);  	if (copied != sizeof(tmp))  		return -EIO;  	return put_user(tmp, (unsigned long __user *)data); @@ -1138,7 +1139,8 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,  {  	int copied; -	copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); +	copied = access_process_vm(tsk, addr, &data, sizeof(data), +			FOLL_FORCE | FOLL_WRITE);  	return (copied == sizeof(data)) ? 0 : -EIO;  } @@ -1155,7 +1157,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,  	switch (request) {  	case PTRACE_PEEKTEXT:  	case PTRACE_PEEKDATA: -		ret = access_process_vm(child, addr, &word, sizeof(word), 0); +		ret = access_process_vm(child, addr, &word, sizeof(word), +				FOLL_FORCE);  		if (ret != sizeof(word))  			ret = -EIO;  		else @@ -1164,7 +1167,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,  	case PTRACE_POKETEXT:  	case PTRACE_POKEDATA: -		ret = access_process_vm(child, addr, &data, sizeof(data), 1); +		ret = access_process_vm(child, addr, &data, sizeof(data), +				FOLL_FORCE | FOLL_WRITE);  		ret = (ret != sizeof(data) ? -EIO : 0);  		break; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2d4ad72f8f3c..d941c97dfbc3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -690,7 +690,14 @@ void init_entity_runnable_average(struct sched_entity *se)  	 * will definitely be update (after enqueue).  	 */  	sa->period_contrib = 1023; -	sa->load_avg = scale_load_down(se->load.weight); +	/* +	 * Tasks are intialized with full load to be seen as heavy tasks until +	 * they get a chance to stabilize to their real load level. +	 * Group entities are intialized with zero load to reflect the fact that +	 * nothing has been attached to the task group yet. +	 */ +	if (entity_is_task(se)) +		sa->load_avg = scale_load_down(se->load.weight);  	sa->load_sum = sa->load_avg * LOAD_AVG_MAX;  	/*  	 * At this point, util_avg won't be used in select_task_rq_fair anyway @@ -5471,13 +5478,18 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd   */  static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)  { -	struct sched_domain *this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); -	u64 avg_idle = this_rq()->avg_idle; -	u64 avg_cost = this_sd->avg_scan_cost; +	struct sched_domain *this_sd; +	u64 avg_cost, avg_idle = this_rq()->avg_idle;  	u64 time, cost;  	s64 delta;  	int cpu, wrap; +	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); +	if (!this_sd) +		return -1; + +	avg_cost = this_sd->avg_scan_cost; +  	/*  	 * Due to large variance we need a large fuzz factor; hackbench in  	 * particularly is sensitive here. diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index c3aad685bbc0..12dd190634ab 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -542,7 +542,6 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)  static int alarm_timer_create(struct k_itimer *new_timer)  {  	enum  alarmtimer_type type; -	struct alarm_base *base;  	if (!alarmtimer_get_rtcdev())  		return -ENOTSUPP; @@ -551,7 +550,6 @@ static int alarm_timer_create(struct k_itimer *new_timer)  		return -EPERM;  	type = clock2alarm(new_timer->it_clock); -	base = &alarm_bases[type];  	alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer);  	return 0;  }  | 
