diff options
| author | Arnd Bergmann <arnd@arndb.de> | 2014-03-17 15:17:07 +0100 | 
|---|---|---|
| committer | Arnd Bergmann <arnd@arndb.de> | 2014-03-17 15:17:07 +0100 | 
| commit | 38edc2da5014e70e46a724d97c3ef3dde106331b (patch) | |
| tree | 590499bacd062e8dd74c6f05a7d811dd714a2d70 /kernel | |
| parent | 937b5991ca7717ceba99014f2ad3f51c85cdb9ad (diff) | |
| parent | 28b191118c11719bb27db621425a70be28a40e08 (diff) | |
Merge tag 'omap-for-v3.15/dt-overo-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/dt
Updates to the .dts files to support more Gumstix boards.
These are sent separately from the rest of the .dts changes
as these depend on the fixes merged into v3.14-rc4, and
needed a bit more time to get updated on the fixes.
* tag 'omap-for-v3.15/dt-overo-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap:
  ARM: dts: Add support for the Overo Summit
  ARM: dts: Add support for the Overo Chestnut43
  ARM: dts: Add support for the Overo Alto35
  ARM: dts: Add support for the Overo Gallop43
  ARM: dts: Add support for the Overo Palo43
  ARM: dts: overo: Add LIS33DE accelerometer
  ARM: dts: overo: Create a file for common Gumstix peripherals
  ARM: dts: overo: Push uart3 pinmux down to expansion board
  ARM: dts: omap3-tobi: Add AT24C01 EEPROM
  ARM: dts: omap3-tobi: Use include file omap-gpmc-smsc9221
  ARM: dts: omap: Add common file for SMSC9221
  ARM: dts: omap3-overo: Add HSUSB PHY
  ARM: dts: omap3-overo: Enable WiFi/BT combo
  ARM: dts: omap3-overo: Add missing pinctrl
  ARM: dts: omap3-tobi: Add missing pinctrl
  ARM: dts: overo: reorganize include files
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Conflicts:
	arch/arm/boot/dts/omap3-overo.dtsi
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup.c | 60 | ||||
| -rw-r--r-- | kernel/power/console.c | 1 | ||||
| -rw-r--r-- | kernel/printk/printk.c | 2 | ||||
| -rw-r--r-- | kernel/sched/core.c | 28 | ||||
| -rw-r--r-- | kernel/sched/cpudeadline.c | 6 | ||||
| -rw-r--r-- | kernel/sched/deadline.c | 10 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 2 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 1 | ||||
| -rw-r--r-- | kernel/time/sched_clock.c | 46 | ||||
| -rw-r--r-- | kernel/user_namespace.c | 2 | ||||
| -rw-r--r-- | kernel/workqueue.c | 7 | 
11 files changed, 94 insertions, 71 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e2f46ba37f72..105f273b6f86 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -886,7 +886,9 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)  		 * per-subsystem and moved to css->id so that lookups are  		 * successful until the target css is released.  		 */ +		mutex_lock(&cgroup_mutex);  		idr_remove(&cgrp->root->cgroup_idr, cgrp->id); +		mutex_unlock(&cgroup_mutex);  		cgrp->id = -1;  		call_rcu(&cgrp->rcu_head, cgroup_free_rcu); @@ -1566,10 +1568,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,  		mutex_lock(&cgroup_mutex);  		mutex_lock(&cgroup_root_mutex); -		root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp, -					   0, 1, GFP_KERNEL); -		if (root_cgrp->id < 0) +		ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); +		if (ret < 0)  			goto unlock_drop; +		root_cgrp->id = ret;  		/* Check for name clashes with existing mounts */  		ret = -EBUSY; @@ -2763,10 +2765,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)  	 */  	update_before = cgroup_serial_nr_next; -	mutex_unlock(&cgroup_mutex); -  	/* add/rm files for all cgroups created before */ -	rcu_read_lock();  	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {  		struct cgroup *cgrp = css->cgroup; @@ -2775,23 +2774,19 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)  		inode = cgrp->dentry->d_inode;  		dget(cgrp->dentry); -		rcu_read_unlock(); -  		dput(prev);  		prev = cgrp->dentry; +		mutex_unlock(&cgroup_mutex);  		mutex_lock(&inode->i_mutex);  		mutex_lock(&cgroup_mutex);  		if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))  			ret = cgroup_addrm_files(cgrp, cfts, is_add); -		mutex_unlock(&cgroup_mutex);  		mutex_unlock(&inode->i_mutex); - -		rcu_read_lock();  		if (ret)  			break;  	} -	rcu_read_unlock(); +	mutex_unlock(&cgroup_mutex);  	dput(prev);  	deactivate_super(sb);  	return ret; @@ -2910,9 +2905,14 @@ static void cgroup_enable_task_cg_lists(void)  		 * We should check if the process is exiting, otherwise  		 * it will race with cgroup_exit() in that the list  		 * entry won't be deleted though the process has exited. +		 * Do it while holding siglock so that we don't end up +		 * racing against cgroup_exit().  		 */ +		spin_lock_irq(&p->sighand->siglock);  		if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))  			list_add(&p->cg_list, &task_css_set(p)->tasks); +		spin_unlock_irq(&p->sighand->siglock); +  		task_unlock(p);  	} while_each_thread(g, p);  	read_unlock(&tasklist_lock); @@ -4158,7 +4158,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,  	struct cgroup *cgrp;  	struct cgroup_name *name;  	struct cgroupfs_root *root = parent->root; -	int ssid, err = 0; +	int ssid, err;  	struct cgroup_subsys *ss;  	struct super_block *sb = root->sb; @@ -4168,19 +4168,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,  		return -ENOMEM;  	name = cgroup_alloc_name(dentry); -	if (!name) +	if (!name) { +		err = -ENOMEM;  		goto err_free_cgrp; +	}  	rcu_assign_pointer(cgrp->name, name);  	/* -	 * Temporarily set the pointer to NULL, so idr_find() won't return -	 * a half-baked cgroup. -	 */ -	cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); -	if (cgrp->id < 0) -		goto err_free_name; - -	/*  	 * Only live parents can have children.  Note that the liveliness  	 * check isn't strictly necessary because cgroup_mkdir() and  	 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it @@ -4189,7 +4183,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,  	 */  	if (!cgroup_lock_live_group(parent)) {  		err = -ENODEV; -		goto err_free_id; +		goto err_free_name; +	} + +	/* +	 * Temporarily set the pointer to NULL, so idr_find() won't return +	 * a half-baked cgroup. +	 */ +	cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); +	if (cgrp->id < 0) { +		err = -ENOMEM; +		goto err_unlock;  	}  	/* Grab a reference on the superblock so the hierarchy doesn't @@ -4221,7 +4225,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,  	 */  	err = cgroup_create_file(dentry, S_IFDIR | mode, sb);  	if (err < 0) -		goto err_unlock; +		goto err_free_id;  	lockdep_assert_held(&dentry->d_inode->i_mutex);  	cgrp->serial_nr = cgroup_serial_nr_next++; @@ -4257,12 +4261,12 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,  	return 0; -err_unlock: -	mutex_unlock(&cgroup_mutex); -	/* Release the reference count that we took on the superblock */ -	deactivate_super(sb);  err_free_id:  	idr_remove(&root->cgroup_idr, cgrp->id); +	/* Release the reference count that we took on the superblock */ +	deactivate_super(sb); +err_unlock: +	mutex_unlock(&cgroup_mutex);  err_free_name:  	kfree(rcu_dereference_raw(cgrp->name));  err_free_cgrp: diff --git a/kernel/power/console.c b/kernel/power/console.c index eacb8bd8cab4..aba9c545a0e3 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c @@ -9,6 +9,7 @@  #include <linux/kbd_kern.h>  #include <linux/vt.h>  #include <linux/module.h> +#include <linux/slab.h>  #include "power.h"  #define SUSPEND_CONSOLE	(MAX_NR_CONSOLES-1) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index b1d255f04135..4dae9cbe9259 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1076,7 +1076,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)  		next_seq = log_next_seq;  		len = 0; -		prev = 0;  		while (len >= 0 && seq < next_seq) {  			struct printk_log *msg = log_from_idx(idx);  			int textlen; @@ -2788,7 +2787,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,  	next_idx = idx;  	l = 0; -	prev = 0;  	while (seq < dumper->next_seq) {  		struct printk_log *msg = log_from_idx(idx); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aab..6edbef296ece 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1952,7 +1952,7 @@ static int dl_overflow(struct task_struct *p, int policy,  {  	struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); -	u64 period = attr->sched_period; +	u64 period = attr->sched_period ?: attr->sched_deadline;  	u64 runtime = attr->sched_runtime;  	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;  	int cpus, err = -1; @@ -3661,13 +3661,14 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)   * @pid: the pid in question.   * @uattr: structure containing the extended parameters.   */ -SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr) +SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, +			       unsigned int, flags)  {  	struct sched_attr attr;  	struct task_struct *p;  	int retval; -	if (!uattr || pid < 0) +	if (!uattr || pid < 0 || flags)  		return -EINVAL;  	if (sched_copy_attr(uattr, &attr)) @@ -3786,7 +3787,7 @@ static int sched_read_attr(struct sched_attr __user *uattr,  		attr->size = usize;  	} -	ret = copy_to_user(uattr, attr, usize); +	ret = copy_to_user(uattr, attr, attr->size);  	if (ret)  		return -EFAULT; @@ -3804,8 +3805,8 @@ err_size:   * @uattr: structure containing the extended parameters.   * @size: sizeof(attr) for fwd/bwd comp.   */ -SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, -		unsigned int, size) +SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, +		unsigned int, size, unsigned int, flags)  {  	struct sched_attr attr = {  		.size = sizeof(struct sched_attr), @@ -3814,7 +3815,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,  	int retval;  	if (!uattr || pid < 0 || size > PAGE_SIZE || -	    size < SCHED_ATTR_SIZE_VER0) +	    size < SCHED_ATTR_SIZE_VER0 || flags)  		return -EINVAL;  	rcu_read_lock(); @@ -7422,6 +7423,7 @@ static int sched_dl_global_constraints(void)  	u64 period = global_rt_period();  	u64 new_bw = to_ratio(period, runtime);  	int cpu, ret = 0; +	unsigned long flags;  	/*  	 * Here we want to check the bandwidth not being set to some @@ -7435,10 +7437,10 @@ static int sched_dl_global_constraints(void)  	for_each_possible_cpu(cpu) {  		struct dl_bw *dl_b = dl_bw_of(cpu); -		raw_spin_lock(&dl_b->lock); +		raw_spin_lock_irqsave(&dl_b->lock, flags);  		if (new_bw < dl_b->total_bw)  			ret = -EBUSY; -		raw_spin_unlock(&dl_b->lock); +		raw_spin_unlock_irqrestore(&dl_b->lock, flags);  		if (ret)  			break; @@ -7451,6 +7453,7 @@ static void sched_dl_do_global(void)  {  	u64 new_bw = -1;  	int cpu; +	unsigned long flags;  	def_dl_bandwidth.dl_period = global_rt_period();  	def_dl_bandwidth.dl_runtime = global_rt_runtime(); @@ -7464,9 +7467,9 @@ static void sched_dl_do_global(void)  	for_each_possible_cpu(cpu) {  		struct dl_bw *dl_b = dl_bw_of(cpu); -		raw_spin_lock(&dl_b->lock); +		raw_spin_lock_irqsave(&dl_b->lock, flags);  		dl_b->bw = new_bw; -		raw_spin_unlock(&dl_b->lock); +		raw_spin_unlock_irqrestore(&dl_b->lock, flags);  	}  } @@ -7475,7 +7478,8 @@ static int sched_rt_global_validate(void)  	if (sysctl_sched_rt_period <= 0)  		return -EINVAL; -	if (sysctl_sched_rt_runtime > sysctl_sched_rt_period) +	if ((sysctl_sched_rt_runtime != RUNTIME_INF) && +		(sysctl_sched_rt_runtime > sysctl_sched_rt_period))  		return -EINVAL;  	return 0; diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 045fc74e3f09..5b8838b56d1c 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)  static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)  { -	WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID); +	WARN_ON(!cpu_present(idx) || idx == IDX_INVALID);  	if (dl_time_before(new_dl, cp->elements[idx].dl)) {  		cp->elements[idx].dl = new_dl; @@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,  	}  out: -	WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1); +	WARN_ON(!cpu_present(best_cpu) && best_cpu != -1);  	return best_cpu;  } @@ -137,7 +137,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)  	int old_idx, new_cpu;  	unsigned long flags; -	WARN_ON(cpu > num_present_cpus()); +	WARN_ON(!cpu_present(cpu));  	raw_spin_lock_irqsave(&cp->lock, flags);  	old_idx = cp->cpu_to_idx[cpu]; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0dd5e0971a07..15cbc17fbf84 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq)  static void update_dl_migration(struct dl_rq *dl_rq)  { -	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { +	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {  		if (!dl_rq->overloaded) {  			dl_set_overload(rq_of_dl_rq(dl_rq));  			dl_rq->overloaded = 1; @@ -137,7 +137,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)  	struct task_struct *p = dl_task_of(dl_se);  	dl_rq = &rq_of_dl_rq(dl_rq)->dl; -	dl_rq->dl_nr_total++;  	if (p->nr_cpus_allowed > 1)  		dl_rq->dl_nr_migratory++; @@ -149,7 +148,6 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)  	struct task_struct *p = dl_task_of(dl_se);  	dl_rq = &rq_of_dl_rq(dl_rq)->dl; -	dl_rq->dl_nr_total--;  	if (p->nr_cpus_allowed > 1)  		dl_rq->dl_nr_migratory--; @@ -717,6 +715,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)  	WARN_ON(!dl_prio(prio));  	dl_rq->dl_nr_running++; +	inc_nr_running(rq_of_dl_rq(dl_rq));  	inc_dl_deadline(dl_rq, deadline);  	inc_dl_migration(dl_se, dl_rq); @@ -730,6 +729,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)  	WARN_ON(!dl_prio(prio));  	WARN_ON(!dl_rq->dl_nr_running);  	dl_rq->dl_nr_running--; +	dec_nr_running(rq_of_dl_rq(dl_rq));  	dec_dl_deadline(dl_rq, dl_se->deadline);  	dec_dl_migration(dl_se, dl_rq); @@ -836,8 +836,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)  	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)  		enqueue_pushable_dl_task(rq, p); - -	inc_nr_running(rq);  }  static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) @@ -850,8 +848,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)  {  	update_curr_dl(rq);  	__dequeue_task_dl(rq, p, flags); - -	dec_nr_running(rq);  }  /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 966cc2bfcb77..78157099b167 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1757,6 +1757,8 @@ void task_numa_work(struct callback_head *work)  			start = end;  			if (pages <= 0)  				goto out; + +			cond_resched();  		} while (end != vma->vm_end);  	} diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c2119fd20f8b..f964add50f38 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -462,7 +462,6 @@ struct dl_rq {  	} earliest_dl;  	unsigned long dl_nr_migratory; -	unsigned long dl_nr_total;  	int overloaded;  	/* diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 0abb36464281..4d23dc4d8139 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -116,20 +116,42 @@ static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)  void __init sched_clock_register(u64 (*read)(void), int bits,  				 unsigned long rate)  { +	u64 res, wrap, new_mask, new_epoch, cyc, ns; +	u32 new_mult, new_shift; +	ktime_t new_wrap_kt;  	unsigned long r; -	u64 res, wrap;  	char r_unit;  	if (cd.rate > rate)  		return;  	WARN_ON(!irqs_disabled()); -	read_sched_clock = read; -	sched_clock_mask = CLOCKSOURCE_MASK(bits); -	cd.rate = rate;  	/* calculate the mult/shift to convert counter ticks to ns. */ -	clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); +	clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); + +	new_mask = CLOCKSOURCE_MASK(bits); + +	/* calculate how many ns until we wrap */ +	wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); +	new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); + +	/* update epoch for new counter and update epoch_ns from old counter*/ +	new_epoch = read(); +	cyc = read_sched_clock(); +	ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, +			  cd.mult, cd.shift); + +	raw_write_seqcount_begin(&cd.seq); +	read_sched_clock = read; +	sched_clock_mask = new_mask; +	cd.rate = rate; +	cd.wrap_kt = new_wrap_kt; +	cd.mult = new_mult; +	cd.shift = new_shift; +	cd.epoch_cyc = new_epoch; +	cd.epoch_ns = ns; +	raw_write_seqcount_end(&cd.seq);  	r = rate;  	if (r >= 4000000) { @@ -141,22 +163,12 @@ void __init sched_clock_register(u64 (*read)(void), int bits,  	} else  		r_unit = ' '; -	/* calculate how many ns until we wrap */ -	wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask); -	cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); -  	/* calculate the ns resolution of this counter */ -	res = cyc_to_ns(1ULL, cd.mult, cd.shift); +	res = cyc_to_ns(1ULL, new_mult, new_shift); +  	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",  		bits, r, r_unit, res, wrap); -	update_sched_clock(); - -	/* -	 * Ensure that sched_clock() starts off at 0ns -	 */ -	cd.epoch_ns = 0; -  	/* Enable IRQ time accounting if we have a fast enough sched_clock */  	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))  		enable_sched_clock_irqtime(); diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 240fb62cf394..dd06439b9c84 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -225,7 +225,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)   *   *	When there is no mapping defined for the user-namespace uid   *	pair INVALID_UID is returned.  Callers are expected to test - *	for and handle handle INVALID_UID being returned.  INVALID_UID + *	for and handle INVALID_UID being returned.  INVALID_UID   *	may be tested for using uid_valid().   */  kuid_t make_kuid(struct user_namespace *ns, uid_t uid) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 82ef9f3b7473..193e977a10ea 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1851,6 +1851,12 @@ static void destroy_worker(struct worker *worker)  	if (worker->flags & WORKER_IDLE)  		pool->nr_idle--; +	/* +	 * Once WORKER_DIE is set, the kworker may destroy itself at any +	 * point.  Pin to ensure the task stays until we're done with it. +	 */ +	get_task_struct(worker->task); +  	list_del_init(&worker->entry);  	worker->flags |= WORKER_DIE; @@ -1859,6 +1865,7 @@ static void destroy_worker(struct worker *worker)  	spin_unlock_irq(&pool->lock);  	kthread_stop(worker->task); +	put_task_struct(worker->task);  	kfree(worker);  	spin_lock_irq(&pool->lock);  | 
