diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-25 11:15:41 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-25 11:15:41 -0800 |
commit | 2d53943090c336c9d298638bad292be349e1b9c4 (patch) | |
tree | 18fc9bbc1ff13fe031a922cf2efc22a903257caf /drivers/block | |
parent | ff6814b078e33a4d26fee9ea80779c81a6744cd8 (diff) | |
parent | 00b89892c869f34528deca957b10d1468c4e8b38 (diff) |
Merge tag 'for-5.5/drivers-20191121' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe:
"Here are the main block driver updates for 5.5. Nothing major in here,
mostly just fixes. This contains:
- a set of bcache changes via Coly
- MD changes from Song
- loop unmap write-zeroes fix (Darrick)
- spelling fixes (Geert)
- zoned additions cleanups to null_blk/dm (Ajay)
- allow null_blk online submit queue changes (Bart)
- NVMe changes via Keith, nothing major here either"
* tag 'for-5.5/drivers-20191121' of git://git.kernel.dk/linux-block: (56 commits)
Revert "bcache: fix fifo index swapping condition in journal_pin_cmp()"
drivers/md/raid5-ppl.c: use the new spelling of RWH_WRITE_LIFE_NOT_SET
drivers/md/raid5.c: use the new spelling of RWH_WRITE_LIFE_NOT_SET
bcache: don't export symbols
bcache: remove the extra cflags for request.o
bcache: at least try to shrink 1 node in bch_mca_scan()
bcache: add idle_max_writeback_rate sysfs interface
bcache: add code comments in bch_btree_leaf_dirty()
bcache: fix deadlock in bcache_allocator
bcache: add code comment bch_keylist_pop() and bch_keylist_pop_front()
bcache: deleted code comments for dead code in bch_data_insert_keys()
bcache: add more accurate error messages in read_super()
bcache: fix static checker warning in bcache_device_free()
bcache: fix a lost wake-up problem caused by mca_cannibalize_lock
bcache: fix fifo index swapping condition in journal_pin_cmp()
md/raid10: prevent access of uninitialized resync_pages offset
md: avoid invalid memory access for array sb->dev_roles
md/raid1: avoid soft lockup under high load
null_blk: add zone open, close, and finish support
dm: add zone open, close and finish support
...
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/loop.c | 26 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 2 | ||||
-rw-r--r-- | drivers/block/null_blk.h | 8 | ||||
-rw-r--r-- | drivers/block/null_blk_main.c | 104 | ||||
-rw-r--r-- | drivers/block/null_blk_zoned.c | 54 |
5 files changed, 153 insertions, 41 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f6f77eaa7217..ef6e251857c8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -417,18 +417,20 @@ out_free_page: return ret; } -static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) +static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, + int mode) { /* - * We use punch hole to reclaim the free space used by the - * image a.k.a. discard. However we do not support discard if - * encryption is enabled, because it may give an attacker - * useful information. + * We use fallocate to manipulate the space mappings used by the image + * a.k.a. discard/zerorange. However we do not support this if + * encryption is enabled, because it may give an attacker useful + * information. */ struct file *file = lo->lo_backing_file; - int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; int ret; + mode |= FALLOC_FL_KEEP_SIZE; + if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { ret = -EOPNOTSUPP; goto out; @@ -596,9 +598,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) switch (req_op(rq)) { case REQ_OP_FLUSH: return lo_req_flush(lo, rq); - case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: - return lo_discard(lo, rq, pos); + /* + * If the caller doesn't want deallocation, call zeroout to + * write zeroes the range. Otherwise, punch them out. + */ + return lo_fallocate(lo, rq, pos, + (rq->cmd_flags & REQ_NOUNMAP) ? + FALLOC_FL_ZERO_RANGE : + FALLOC_FL_PUNCH_HOLE); + case REQ_OP_DISCARD: + return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE); case REQ_OP_WRITE: if (lo->transfer) return lo_write_transfer(lo, rq, pos); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 964f78cfffa0..f6bafa9a68b9 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -129,7 +129,7 @@ struct mtip_compat_ide_task_request_s { /* * This function check_for_surprise_removal is called * while card is removed from the system and it will - * read the vendor id from the configration space + * read the vendor id from the configuration space * * @pdev Pointer to the pci_dev structure. * diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index a235c45e22a7..93c2a3d403da 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -96,6 +96,8 @@ int null_zone_report(struct gendisk *disk, sector_t sector, blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op, sector_t sector, sector_t nr_sectors); +size_t null_zone_valid_read_len(struct nullb *nullb, + sector_t sector, unsigned int len); #else static inline int null_zone_init(struct nullb_device *dev) { @@ -115,5 +117,11 @@ static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd, { return BLK_STS_NOTSUPP; } +static inline size_t null_zone_valid_read_len(struct nullb *nullb, + sector_t sector, + unsigned int len) +{ + return len; +} #endif /* CONFIG_BLK_DEV_ZONED */ #endif /* __NULL_BLK_H */ diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 0e7da5015ccd..ea7a4d6b7848 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -227,7 +227,7 @@ static ssize_t nullb_device_uint_attr_store(unsigned int *val, int result; result = kstrtouint(page, 0, &tmp); - if (result) + if (result < 0) return result; *val = tmp; @@ -241,7 +241,7 @@ static ssize_t nullb_device_ulong_attr_store(unsigned long *val, unsigned long tmp; result = kstrtoul(page, 0, &tmp); - if (result) + if (result < 0) return result; *val = tmp; @@ -255,7 +255,7 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page, int result; result = kstrtobool(page, &tmp); - if (result) + if (result < 0) return result; *val = tmp; @@ -263,7 +263,7 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page, } /* The following macro should only be used with TYPE = {uint, ulong, bool}. */ -#define NULLB_DEVICE_ATTR(NAME, TYPE) \ +#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \ static ssize_t \ nullb_device_##NAME##_show(struct config_item *item, char *page) \ { \ @@ -274,31 +274,57 @@ static ssize_t \ nullb_device_##NAME##_store(struct config_item *item, const char *page, \ size_t count) \ { \ - if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \ - return -EBUSY; \ - return nullb_device_##TYPE##_attr_store( \ - &to_nullb_device(item)->NAME, page, count); \ + int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY; \ + struct nullb_device *dev = to_nullb_device(item); \ + TYPE new_value; \ + int ret; \ + \ + ret = nullb_device_##TYPE##_attr_store(&new_value, page, count); \ + if (ret < 0) \ + return ret; \ + if (apply_fn) \ + ret = apply_fn(dev, new_value); \ + else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \ + ret = -EBUSY; \ + if (ret < 0) \ + return ret; \ + dev->NAME = new_value; \ + return count; \ } \ CONFIGFS_ATTR(nullb_device_, NAME); -NULLB_DEVICE_ATTR(size, ulong); -NULLB_DEVICE_ATTR(completion_nsec, ulong); -NULLB_DEVICE_ATTR(submit_queues, uint); -NULLB_DEVICE_ATTR(home_node, uint); -NULLB_DEVICE_ATTR(queue_mode, uint); -NULLB_DEVICE_ATTR(blocksize, uint); -NULLB_DEVICE_ATTR(irqmode, uint); -NULLB_DEVICE_ATTR(hw_queue_depth, uint); -NULLB_DEVICE_ATTR(index, uint); -NULLB_DEVICE_ATTR(blocking, bool); -NULLB_DEVICE_ATTR(use_per_node_hctx, bool); -NULLB_DEVICE_ATTR(memory_backed, bool); -NULLB_DEVICE_ATTR(discard, bool); -NULLB_DEVICE_ATTR(mbps, uint); -NULLB_DEVICE_ATTR(cache_size, ulong); -NULLB_DEVICE_ATTR(zoned, bool); -NULLB_DEVICE_ATTR(zone_size, ulong); -NULLB_DEVICE_ATTR(zone_nr_conv, uint); +static int nullb_apply_submit_queues(struct nullb_device *dev, + unsigned int submit_queues) +{ + struct nullb *nullb = dev->nullb; + struct blk_mq_tag_set *set; + + if (!nullb) + return 0; + + set = nullb->tag_set; + blk_mq_update_nr_hw_queues(set, submit_queues); + return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM; +} + +NULLB_DEVICE_ATTR(size, ulong, NULL); +NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL); +NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues); +NULLB_DEVICE_ATTR(home_node, uint, NULL); +NULLB_DEVICE_ATTR(queue_mode, uint, NULL); +NULLB_DEVICE_ATTR(blocksize, uint, NULL); +NULLB_DEVICE_ATTR(irqmode, uint, NULL); +NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL); +NULLB_DEVICE_ATTR(index, uint, NULL); +NULLB_DEVICE_ATTR(blocking, bool, NULL); +NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL); +NULLB_DEVICE_ATTR(memory_backed, bool, NULL); +NULLB_DEVICE_ATTR(discard, bool, NULL); +NULLB_DEVICE_ATTR(mbps, uint, NULL); +NULLB_DEVICE_ATTR(cache_size, ulong, NULL); +NULLB_DEVICE_ATTR(zoned, bool, NULL); +NULLB_DEVICE_ATTR(zone_size, ulong, NULL); +NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL); static ssize_t nullb_device_power_show(struct config_item *item, char *page) { @@ -996,6 +1022,16 @@ next: return 0; } +static void nullb_fill_pattern(struct nullb *nullb, struct page *page, + unsigned int len, unsigned int off) +{ + void *dst; + + dst = kmap_atomic(page); + memset(dst + off, 0xFF, len); + kunmap_atomic(dst); +} + static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n) { size_t temp; @@ -1036,10 +1072,24 @@ static int null_transfer(struct nullb *nullb, struct page *page, unsigned int len, unsigned int off, bool is_write, sector_t sector, bool is_fua) { + struct nullb_device *dev = nullb->dev; + unsigned int valid_len = len; int err = 0; if (!is_write) { - err = copy_from_nullb(nullb, page, off, sector, len); + if (dev->zoned) + valid_len = null_zone_valid_read_len(nullb, + sector, len); + + if (valid_len) { + err = copy_from_nullb(nullb, page, off, + sector, valid_len); + off += valid_len; + len -= valid_len; + } + + if (len) + nullb_fill_pattern(nullb, page, len, off); flush_dcache_page(page); } else { flush_dcache_page(page); diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 3d7fdea872f8..02f41a3bc4cb 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -84,6 +84,24 @@ int null_zone_report(struct gendisk *disk, sector_t sector, return 0; } +size_t null_zone_valid_read_len(struct nullb *nullb, + sector_t sector, unsigned int len) +{ + struct nullb_device *dev = nullb->dev; + struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)]; + unsigned int nr_sectors = len >> SECTOR_SHIFT; + + /* Read must be below the write pointer position */ + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || + sector + nr_sectors <= zone->wp) + return len; + + if (sector > zone->wp) + return 0; + + return (zone->wp - sector) << SECTOR_SHIFT; +} + static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, unsigned int nr_sectors) { @@ -118,14 +136,14 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, return BLK_STS_OK; } -static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector) +static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, + sector_t sector) { struct nullb_device *dev = cmd->nq->dev; - unsigned int zno = null_zone_no(dev, sector); - struct blk_zone *zone = &dev->zones[zno]; + struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)]; size_t i; - switch (req_op(cmd->rq)) { + switch (op) { case REQ_OP_ZONE_RESET_ALL: for (i = 0; i < dev->nr_zones; i++) { if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL) @@ -141,6 +159,29 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector) zone->cond = BLK_ZONE_COND_EMPTY; zone->wp = zone->start; break; + case REQ_OP_ZONE_OPEN: + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) + return BLK_STS_IOERR; + if (zone->cond == BLK_ZONE_COND_FULL) + return BLK_STS_IOERR; + + zone->cond = BLK_ZONE_COND_EXP_OPEN; + break; + case REQ_OP_ZONE_CLOSE: + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) + return BLK_STS_IOERR; + if (zone->cond == BLK_ZONE_COND_FULL) + return BLK_STS_IOERR; + + zone->cond = BLK_ZONE_COND_CLOSED; + break; + case REQ_OP_ZONE_FINISH: + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) + return BLK_STS_IOERR; + + zone->cond = BLK_ZONE_COND_FULL; + zone->wp = zone->start + zone->len; + break; default: return BLK_STS_NOTSUPP; } @@ -155,7 +196,10 @@ blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op, return null_zone_write(cmd, sector, nr_sectors); case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET_ALL: - return null_zone_reset(cmd, sector); + case REQ_OP_ZONE_OPEN: + case REQ_OP_ZONE_CLOSE: + case REQ_OP_ZONE_FINISH: + return null_zone_mgmt(cmd, op, sector); default: return BLK_STS_OK; } |