summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig2
-rw-r--r--block/bio.c3
-rw-r--r--block/blk-mq-tag.c29
-rw-r--r--block/blk-mq.c24
-rw-r--r--block/blk-tag.c26
-rw-r--r--block/blk-zoned.c4
-rw-r--r--block/bsg.c22
-rw-r--r--block/partitions/check.c2
-rw-r--r--block/partitions/ldm.c2
9 files changed, 34 insertions, 80 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 28ec55752b68..eb50fd4977c2 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -114,7 +114,7 @@ config BLK_DEV_THROTTLING
one needs to mount and use blkio cgroup controller for creating
cgroups and specifying per device IO rate policies.
- See Documentation/cgroups/blkio-controller.txt for more information.
+ See Documentation/cgroup-v1/blkio-controller.txt for more information.
config BLK_DEV_THROTTLING_LOW
bool "Block throttling .low limit interface support (EXPERIMENTAL)"
diff --git a/block/bio.c b/block/bio.c
index db9a40e9a136..9710e275f230 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -2091,7 +2091,8 @@ static int __init init_bio(void)
{
bio_slab_max = 2;
bio_slab_nr = 0;
- bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
+ bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
+ GFP_KERNEL);
if (!bio_slabs)
panic("bio: can't allocate bios\n");
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 70356a2a11ab..09b2ee6694fb 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -311,35 +311,6 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
-int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
- int (fn)(void *, struct request *))
-{
- int i, j, ret = 0;
-
- if (WARN_ON_ONCE(!fn))
- goto out;
-
- for (i = 0; i < set->nr_hw_queues; i++) {
- struct blk_mq_tags *tags = set->tags[i];
-
- if (!tags)
- continue;
-
- for (j = 0; j < tags->nr_tags; j++) {
- if (!tags->static_rqs[j])
- continue;
-
- ret = fn(data, tags->static_rqs[j]);
- if (ret)
- goto out;
- }
- }
-
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);
-
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
{
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d2de0a719ab8..70c65bb6c013 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -671,6 +671,7 @@ static void __blk_mq_requeue_request(struct request *rq)
if (blk_mq_request_started(rq)) {
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
+ rq->rq_flags &= ~RQF_TIMED_OUT;
if (q->dma_drain_size && blk_rq_bytes(rq))
rq->nr_phys_segments--;
}
@@ -770,6 +771,7 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq);
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
+ req->rq_flags |= RQF_TIMED_OUT;
if (req->q->mq_ops->timeout) {
enum blk_eh_timer_return ret;
@@ -779,6 +781,7 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
}
+ req->rq_flags &= ~RQF_TIMED_OUT;
blk_add_timer(req);
}
@@ -788,6 +791,8 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
return false;
+ if (rq->rq_flags & RQF_TIMED_OUT)
+ return false;
deadline = blk_rq_deadline(rq);
if (time_after_eq(jiffies, deadline))
@@ -1903,7 +1908,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
if (!tags)
return NULL;
- tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
+ tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
node);
if (!tags->rqs) {
@@ -1911,9 +1916,9 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
return NULL;
}
- tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
- node);
+ tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+ node);
if (!tags->static_rqs) {
kfree(tags->rqs);
blk_mq_free_tags(tags);
@@ -2349,7 +2354,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
mutex_lock(&set->tag_list_lock);
list_del_rcu(&q->tag_set_list);
- INIT_LIST_HEAD(&q->tag_set_list);
if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_SHARED;
@@ -2357,8 +2361,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
blk_mq_update_tag_set_depth(set, false);
}
mutex_unlock(&set->tag_list_lock);
-
synchronize_rcu();
+ INIT_LIST_HEAD(&q->tag_set_list);
}
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@@ -2522,7 +2526,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
- q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
+ q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node);
if (!q->queue_hw_ctx)
goto err_percpu;
@@ -2741,14 +2745,14 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids;
- set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
+ set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
if (!set->tags)
return -ENOMEM;
ret = -ENOMEM;
- set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
- GFP_KERNEL, set->numa_node);
+ set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
+ GFP_KERNEL, set->numa_node);
if (!set->mq_map)
goto out_free_tags;
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 09f19c6c52ce..fbc153aef166 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -99,12 +99,12 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
__func__, depth);
}
- tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+ tag_index = kcalloc(depth, sizeof(struct request *), GFP_ATOMIC);
if (!tag_index)
goto fail;
nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
- tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+ tag_map = kcalloc(nr_ulongs, sizeof(unsigned long), GFP_ATOMIC);
if (!tag_map)
goto fail;
@@ -188,7 +188,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
*/
q->queue_tags = tags;
queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
- INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
}
EXPORT_SYMBOL(blk_queue_init_tags);
@@ -374,27 +373,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
rq->tag = tag;
bqt->tag_index[tag] = rq;
blk_start_request(rq);
- list_add(&rq->queuelist, &q->tag_busy_list);
return 0;
}
EXPORT_SYMBOL(blk_queue_start_tag);
-
-/**
- * blk_queue_invalidate_tags - invalidate all pending tags
- * @q: the request queue for the device
- *
- * Description:
- * Hardware conditions may dictate a need to stop all pending requests.
- * In this case, we will safely clear the block side of the tag queue and
- * readd all requests to the request queue in the right order.
- **/
-void blk_queue_invalidate_tags(struct request_queue *q)
-{
- struct list_head *tmp, *n;
-
- lockdep_assert_held(q->queue_lock);
-
- list_for_each_safe(tmp, n, &q->tag_busy_list)
- blk_requeue_request(q, list_entry_rq(tmp));
-}
-EXPORT_SYMBOL(blk_queue_invalidate_tags);
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 3d08dc84db16..51000914e23f 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -331,8 +331,8 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone))
return -ERANGE;
- zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone),
- GFP_KERNEL | __GFP_ZERO);
+ zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
+ GFP_KERNEL | __GFP_ZERO);
if (!zones)
return -ENOMEM;
diff --git a/block/bsg.c b/block/bsg.c
index 132e657e2d91..66602c489956 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -693,6 +693,8 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
struct bsg_device *bd;
unsigned char buf[32];
+ lockdep_assert_held(&bsg_mutex);
+
if (!blk_get_queue(rq))
return ERR_PTR(-ENXIO);
@@ -707,14 +709,12 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
bsg_set_block(bd, file);
atomic_set(&bd->ref_count, 1);
- mutex_lock(&bsg_mutex);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
bsg_dbg(bd, "bound to <%s>, max queue %d\n",
format_dev_t(buf, inode->i_rdev), bd->max_queue);
- mutex_unlock(&bsg_mutex);
return bd;
}
@@ -722,7 +722,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
{
struct bsg_device *bd;
- mutex_lock(&bsg_mutex);
+ lockdep_assert_held(&bsg_mutex);
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
@@ -732,7 +732,6 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
}
bd = NULL;
found:
- mutex_unlock(&bsg_mutex);
return bd;
}
@@ -746,17 +745,18 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
*/
mutex_lock(&bsg_mutex);
bcd = idr_find(&bsg_minor_idr, iminor(inode));
- mutex_unlock(&bsg_mutex);
- if (!bcd)
- return ERR_PTR(-ENODEV);
+ if (!bcd) {
+ bd = ERR_PTR(-ENODEV);
+ goto out_unlock;
+ }
bd = __bsg_get_device(iminor(inode), bcd->queue);
- if (bd)
- return bd;
-
- bd = bsg_add_device(inode, bcd->queue, file);
+ if (!bd)
+ bd = bsg_add_device(inode, bcd->queue, file);
+out_unlock:
+ mutex_unlock(&bsg_mutex);
return bd;
}
diff --git a/block/partitions/check.c b/block/partitions/check.c
index 720145c49066..ffe408fead0c 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -122,7 +122,7 @@ static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
return NULL;
nr = disk_max_parts(hd);
- state->parts = vzalloc(nr * sizeof(state->parts[0]));
+ state->parts = vzalloc(array_size(nr, sizeof(state->parts[0])));
if (!state->parts) {
kfree(state);
return NULL;
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 2a365c756648..0417937dfe99 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -378,7 +378,7 @@ static bool ldm_validate_tocblocks(struct parsed_partitions *state,
BUG_ON(!state || !ldb);
ph = &ldb->ph;
tb[0] = &ldb->toc;
- tb[1] = kmalloc(sizeof(*tb[1]) * 3, GFP_KERNEL);
+ tb[1] = kmalloc_array(3, sizeof(*tb[1]), GFP_KERNEL);
if (!tb[1]) {
ldm_crit("Out of memory.");
goto err;