diff options
| author | Tejun Heo <tj@kernel.org> | 2015-12-07 10:09:03 -0500 | 
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2015-12-07 10:09:03 -0500 | 
| commit | 0b98f0c04245877ae0b625a7f0aa55b8ff98e0c4 (patch) | |
| tree | 486ebe0d76217a4f7781e28fbd96facb0b66f9da /include/linux/blkdev.h | |
| parent | 67cde9c4938945b9510730c64e68d2f1dd7bc0aa (diff) | |
| parent | 527e9316f8ec44bd53d90fb9f611fa7ffff52bb9 (diff) | |
Merge branch 'master' into for-4.4-fixes
The following commit which went into mainline through networking tree
  3b13758f51de ("cgroups: Allow dynamically changing net_classid")
conflicts in net/core/netclassid_cgroup.c with the following pending
fix in cgroup/for-4.4-fixes.
  1f7dd3e5a6e4 ("cgroup: fix handling of multi-destination migration from subtree_control enabling")
The former separates out update_classid() from cgrp_attach() and
updates it to walk all fds of all tasks in the target css so that it
can be used from both migration and config change paths.  The latter
drops @css from cgrp_attach().
Resolve the conflict by making cgrp_attach() call update_classid()
with the css from the first task.  We can revive @tset walking in
cgrp_attach() but given that net_cls is v1 only where there always is
only one target css during migration, this is fine.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Nina Schiff <ninasc@fb.com>
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 5 | 
1 files changed, 3 insertions, 2 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3fe27f8d91f0..0169ba2e2e64 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -254,6 +254,7 @@ struct queue_limits {  	unsigned long		virt_boundary_mask;  	unsigned int		max_hw_sectors; +	unsigned int		max_dev_sectors;  	unsigned int		chunk_sectors;  	unsigned int		max_sectors;  	unsigned int		max_segment_size; @@ -773,7 +774,6 @@ extern void blk_rq_set_block_pc(struct request *);  extern void blk_requeue_request(struct request_queue *, struct request *);  extern void blk_add_request_payload(struct request *rq, struct page *page,  		unsigned int len); -extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);  extern int blk_lld_busy(struct request_queue *q);  extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,  			     struct bio_set *bs, gfp_t gfp_mask, @@ -794,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,  extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,  			 struct scsi_ioctl_command __user *); +extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); +extern void blk_queue_exit(struct request_queue *q);  extern void blk_start_queue(struct request_queue *q);  extern void blk_stop_queue(struct request_queue *q);  extern void blk_sync_queue(struct request_queue *q); @@ -958,7 +960,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,  extern void blk_cleanup_queue(struct request_queue *);  extern void blk_queue_make_request(struct request_queue *, make_request_fn *);  extern void blk_queue_bounce_limit(struct request_queue *, u64); -extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);  extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);  extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);  extern void blk_queue_max_segments(struct request_queue *, unsigned short);  | 
