diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 134 | 
1 files changed, 61 insertions, 73 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 60d016138997..5bdf2ac9142c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -176,6 +176,21 @@ static inline bool disk_live(struct gendisk *disk)  	return !inode_unhashed(disk->part0->bd_inode);  } +/** + * disk_openers - returns how many openers are there for a disk + * @disk: disk to check + * + * This returns the number of openers for a disk.  Note that this value is only + * stable if disk->open_mutex is held. + * + * Note: Due to a quirk in the block layer open code, each open partition is + * only counted once even if there are multiple openers. + */ +static inline unsigned int disk_openers(struct gendisk *disk) +{ +	return atomic_read(&disk->part0->bd_openers); +} +  /*   * The gendisk is refcounted by the part0 block_device, and the bd_device   * therein is also used for device model presentation in sysfs. @@ -248,6 +263,7 @@ struct queue_limits {  	unsigned int		io_opt;  	unsigned int		max_discard_sectors;  	unsigned int		max_hw_discard_sectors; +	unsigned int		max_secure_erase_sectors;  	unsigned int		max_write_zeroes_sectors;  	unsigned int		max_zone_append_sectors;  	unsigned int		discard_granularity; @@ -540,10 +556,8 @@ struct request_queue {  #define QUEUE_FLAG_NONROT	6	/* non-rotational device (SSD) */  #define QUEUE_FLAG_VIRT		QUEUE_FLAG_NONROT /* paravirt device */  #define QUEUE_FLAG_IO_STAT	7	/* do disk/partitions IO accounting */ -#define QUEUE_FLAG_DISCARD	8	/* supports DISCARD */  #define QUEUE_FLAG_NOXMERGES	9	/* No extended merges */  #define QUEUE_FLAG_ADD_RANDOM	10	/* Contributes to random pool */ -#define QUEUE_FLAG_SECERASE	11	/* supports secure erase */  #define QUEUE_FLAG_SAME_FORCE	12	/* force complete on same CPU */  #define QUEUE_FLAG_DEAD		13	/* queue tear-down finished */  #define QUEUE_FLAG_INIT_DONE	14	/* queue is initialized */ @@ -582,11 +596,8 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);  	test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)  #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)  #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) -#define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)  #define blk_queue_zone_resetall(q)	\  	test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) -#define blk_queue_secure_erase(q) \ -	(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))  #define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)  #define blk_queue_pci_p2pdma(q)	\  	test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) @@ -602,7 +613,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);  			     REQ_FAILFAST_DRIVER))  #define blk_queue_quiesced(q)	test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)  #define blk_queue_pm_only(q)	atomic_read(&(q)->pm_only) -#define blk_queue_fua(q)	test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)  #define blk_queue_registered(q)	test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)  #define blk_queue_nowait(q)	test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags) @@ -950,6 +960,8 @@ extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);  extern void blk_queue_max_segments(struct request_queue *, unsigned short);  extern void blk_queue_max_discard_segments(struct request_queue *,  		unsigned short); +void blk_queue_max_secure_erase_sectors(struct request_queue *q, +		unsigned int max_sectors);  extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);  extern void blk_queue_max_discard_sectors(struct request_queue *q,  		unsigned int max_discard_sectors); @@ -1090,13 +1102,12 @@ static inline long nr_blockdev_pages(void)  extern void blk_io_schedule(void); -#define BLKDEV_DISCARD_SECURE	(1 << 0)	/* issue a secure erase */ - -extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, -		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); -extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, -		sector_t nr_sects, gfp_t gfp_mask, int flags, -		struct bio **biop); +int blkdev_issue_discard(struct block_device *bdev, sector_t sector, +		sector_t nr_sects, gfp_t gfp_mask); +int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, +		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); +int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, +		sector_t nr_sects, gfp_t gfp);  #define BLKDEV_ZERO_NOUNMAP	(1 << 0)  /* do not free blocks */  #define BLKDEV_ZERO_NOFALLBACK	(1 << 1)  /* don't write explicit zeroes */ @@ -1115,7 +1126,7 @@ static inline int sb_issue_discard(struct super_block *sb, sector_t block,  					      SECTOR_SHIFT),  				    nr_blocks << (sb->s_blocksize_bits -  						  SECTOR_SHIFT), -				    gfp_mask, flags); +				    gfp_mask);  }  static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,  		sector_t nr_blocks, gfp_t gfp_mask) @@ -1189,6 +1200,12 @@ static inline unsigned int queue_max_zone_append_sectors(const struct request_qu  	return min(l->max_zone_append_sectors, l->max_sectors);  } +static inline unsigned int +bdev_max_zone_append_sectors(struct block_device *bdev) +{ +	return queue_max_zone_append_sectors(bdev_get_queue(bdev)); +} +  static inline unsigned queue_logical_block_size(const struct request_queue *q)  {  	int retval = 512; @@ -1246,84 +1263,54 @@ bdev_zone_write_granularity(struct block_device *bdev)  	return queue_zone_write_granularity(bdev_get_queue(bdev));  } -static inline int queue_alignment_offset(const struct request_queue *q) -{ -	if (q->limits.misaligned) -		return -1; +int bdev_alignment_offset(struct block_device *bdev); +unsigned int bdev_discard_alignment(struct block_device *bdev); -	return q->limits.alignment_offset; +static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) +{ +	return bdev_get_queue(bdev)->limits.max_discard_sectors;  } -static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) +static inline unsigned int bdev_discard_granularity(struct block_device *bdev)  { -	unsigned int granularity = max(lim->physical_block_size, lim->io_min); -	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) -		<< SECTOR_SHIFT; +	return bdev_get_queue(bdev)->limits.discard_granularity; +} -	return (granularity + lim->alignment_offset - alignment) % granularity; +static inline unsigned int +bdev_max_secure_erase_sectors(struct block_device *bdev) +{ +	return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;  } -static inline int bdev_alignment_offset(struct block_device *bdev) +static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)  {  	struct request_queue *q = bdev_get_queue(bdev); -	if (q->limits.misaligned) -		return -1; -	if (bdev_is_partition(bdev)) -		return queue_limit_alignment_offset(&q->limits, -				bdev->bd_start_sect); -	return q->limits.alignment_offset; +	if (q) +		return q->limits.max_write_zeroes_sectors; + +	return 0;  } -static inline int queue_discard_alignment(const struct request_queue *q) +static inline bool bdev_nonrot(struct block_device *bdev)  { -	if (q->limits.discard_misaligned) -		return -1; - -	return q->limits.discard_alignment; +	return blk_queue_nonrot(bdev_get_queue(bdev));  } -static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) +static inline bool bdev_stable_writes(struct block_device *bdev)  { -	unsigned int alignment, granularity, offset; - -	if (!lim->max_discard_sectors) -		return 0; - -	/* Why are these in bytes, not sectors? */ -	alignment = lim->discard_alignment >> SECTOR_SHIFT; -	granularity = lim->discard_granularity >> SECTOR_SHIFT; -	if (!granularity) -		return 0; - -	/* Offset of the partition start in 'granularity' sectors */ -	offset = sector_div(sector, granularity); - -	/* And why do we do this modulus *again* in blkdev_issue_discard()? */ -	offset = (granularity + alignment - offset) % granularity; - -	/* Turn it back into bytes, gaah */ -	return offset << SECTOR_SHIFT; +	return test_bit(QUEUE_FLAG_STABLE_WRITES, +			&bdev_get_queue(bdev)->queue_flags);  } -static inline int bdev_discard_alignment(struct block_device *bdev) +static inline bool bdev_write_cache(struct block_device *bdev)  { -	struct request_queue *q = bdev_get_queue(bdev); - -	if (bdev_is_partition(bdev)) -		return queue_limit_discard_alignment(&q->limits, -				bdev->bd_start_sect); -	return q->limits.discard_alignment; +	return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);  } -static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) +static inline bool bdev_fua(struct block_device *bdev)  { -	struct request_queue *q = bdev_get_queue(bdev); - -	if (q) -		return q->limits.max_write_zeroes_sectors; - -	return 0; +	return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);  }  static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) @@ -1491,9 +1478,10 @@ static inline void blk_wake_io_task(struct task_struct *waiter)  		wake_up_process(waiter);  } -unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, -		unsigned int op); -void disk_end_io_acct(struct gendisk *disk, unsigned int op, +unsigned long bdev_start_io_acct(struct block_device *bdev, +				 unsigned int sectors, unsigned int op, +				 unsigned long start_time); +void bdev_end_io_acct(struct block_device *bdev, unsigned int op,  		unsigned long start_time);  void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);  | 
