summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-27 17:27:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-27 17:27:06 -0700
commit35fab9271b7e6d193b47005c4d07369714db4fd1 (patch)
treefd004adde50462d752dd90ca3e1c6c664f8f5a1c /drivers/block
parentda46b58ff884146f6153064f18d276806f3c114c (diff)
parentcbfac7707ba16619006a4fd60faac46303fd2f3e (diff)
Merge tag 'for-linus-6.4-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from Juergen Gross: - some cleanups in the Xen blkback driver - fix potential sleeps under lock in various Xen drivers * tag 'for-linus-6.4-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/blkback: move blkif_get_x86_*_req() into blkback.c xen/blkback: simplify free_persistent_gnts() interface xen/blkback: remove stale prototype xen/blkback: fix white space code style issues xen/pvcalls: don't call bind_evtchn_to_irqhandler() under lock xen/scsiback: don't call scsiback_free_translation_entry() under lock xen/pciback: don't call pcistub_device_put() under lock
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/xen-blkback/blkback.c126
-rw-r--r--drivers/block/xen-blkback/common.h103
2 files changed, 118 insertions, 111 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index a5cf7f1e871c..c362f4ad80ab 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -239,9 +239,9 @@ static void put_persistent_gnt(struct xen_blkif_ring *ring,
atomic_dec(&ring->persistent_gnt_in_use);
}
-static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
- unsigned int num)
+static void free_persistent_gnts(struct xen_blkif_ring *ring)
{
+ struct rb_root *root = &ring->persistent_gnts;
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
@@ -249,6 +249,9 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
int segs_to_unmap = 0;
struct gntab_unmap_queue_data unmap_data;
+ if (RB_EMPTY_ROOT(root))
+ return;
+
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
@@ -277,9 +280,11 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
rb_erase(&persistent_gnt->node, root);
kfree(persistent_gnt);
- num--;
+ ring->persistent_gnt_c--;
}
- BUG_ON(num != 0);
+
+ BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
+ BUG_ON(ring->persistent_gnt_c != 0);
}
void xen_blkbk_unmap_purged_grants(struct work_struct *work)
@@ -631,12 +636,7 @@ purge_gnt_list:
void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
{
/* Free all persistent grant pages */
- if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
- free_persistent_gnts(ring, &ring->persistent_gnts,
- ring->persistent_gnt_c);
-
- BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
- ring->persistent_gnt_c = 0;
+ free_persistent_gnts(ring);
/* Since we are shutting down remove all pages from the buffer */
gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
@@ -891,7 +891,7 @@ next:
out:
for (i = last_map; i < num; i++) {
/* Don't zap current batch's valid persistent grants. */
- if(i >= map_until)
+ if (i >= map_until)
pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE;
}
@@ -1072,7 +1072,111 @@ static void end_block_io_op(struct bio *bio)
bio_put(bio);
}
+static void blkif_get_x86_32_req(struct blkif_request *dst,
+ const struct blkif_x86_32_request *src)
+{
+ unsigned int i, n;
+
+ dst->operation = READ_ONCE(src->operation);
+
+ switch (dst->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
+ dst->u.rw.sector_number = src->u.rw.sector_number;
+ n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
+ dst->u.rw.nr_segments);
+ for (i = 0; i < n; i++)
+ dst->u.rw.seg[i] = src->u.rw.seg[i];
+ break;
+
+ case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
+ dst->u.discard.sector_number = src->u.discard.sector_number;
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ break;
+
+ case BLKIF_OP_INDIRECT:
+ dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
+ dst->u.indirect.nr_segments =
+ READ_ONCE(src->u.indirect.nr_segments);
+ dst->u.indirect.handle = src->u.indirect.handle;
+ dst->u.indirect.id = src->u.indirect.id;
+ dst->u.indirect.sector_number = src->u.indirect.sector_number;
+ n = min(MAX_INDIRECT_PAGES,
+ INDIRECT_PAGES(dst->u.indirect.nr_segments));
+ for (i = 0; i < n; i++)
+ dst->u.indirect.indirect_grefs[i] =
+ src->u.indirect.indirect_grefs[i];
+ break;
+
+ default:
+ /*
+ * Don't know how to translate this op. Only get the
+ * ID so failure can be reported to the frontend.
+ */
+ dst->u.other.id = src->u.other.id;
+ break;
+ }
+}
+
+static void blkif_get_x86_64_req(struct blkif_request *dst,
+ const struct blkif_x86_64_request *src)
+{
+ unsigned int i, n;
+
+ dst->operation = READ_ONCE(src->operation);
+
+ switch (dst->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
+ dst->u.rw.sector_number = src->u.rw.sector_number;
+ n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
+ dst->u.rw.nr_segments);
+ for (i = 0; i < n; i++)
+ dst->u.rw.seg[i] = src->u.rw.seg[i];
+ break;
+
+ case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
+ dst->u.discard.sector_number = src->u.discard.sector_number;
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ break;
+
+ case BLKIF_OP_INDIRECT:
+ dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
+ dst->u.indirect.nr_segments =
+ READ_ONCE(src->u.indirect.nr_segments);
+ dst->u.indirect.handle = src->u.indirect.handle;
+ dst->u.indirect.id = src->u.indirect.id;
+ dst->u.indirect.sector_number = src->u.indirect.sector_number;
+ n = min(MAX_INDIRECT_PAGES,
+ INDIRECT_PAGES(dst->u.indirect.nr_segments));
+ for (i = 0; i < n; i++)
+ dst->u.indirect.indirect_grefs[i] =
+ src->u.indirect.indirect_grefs[i];
+ break;
+ default:
+ /*
+ * Don't know how to translate this op. Only get the
+ * ID so failure can be reported to the frontend.
+ */
+ dst->u.other.id = src->u.other.id;
+ break;
+ }
+}
/*
* Function to copy the from the ring buffer the 'struct blkif_request'
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index a28473470e66..40f67bfc052d 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -296,7 +296,7 @@ struct xen_blkif_ring {
struct work_struct free_work;
/* Thread shutdown wait queue. */
wait_queue_head_t shutdown_wq;
- struct xen_blkif *blkif;
+ struct xen_blkif *blkif;
};
struct xen_blkif {
@@ -315,7 +315,7 @@ struct xen_blkif {
atomic_t drain;
struct work_struct free_work;
- unsigned int nr_ring_pages;
+ unsigned int nr_ring_pages;
bool multi_ref;
/* All rings for this device. */
struct xen_blkif_ring *rings;
@@ -329,7 +329,7 @@ struct seg_buf {
};
struct grant_page {
- struct page *page;
+ struct page *page;
struct persistent_gnt *persistent_gnt;
grant_handle_t handle;
grant_ref_t gref;
@@ -384,7 +384,6 @@ void xen_blkif_xenbus_fini(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
-int xen_blkif_purge_persistent(void *arg);
void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
@@ -395,100 +394,4 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
void xen_blkbk_unmap_purged_grants(struct work_struct *work);
-static inline void blkif_get_x86_32_req(struct blkif_request *dst,
- struct blkif_x86_32_request *src)
-{
- int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
- dst->operation = READ_ONCE(src->operation);
- switch (dst->operation) {
- case BLKIF_OP_READ:
- case BLKIF_OP_WRITE:
- case BLKIF_OP_WRITE_BARRIER:
- case BLKIF_OP_FLUSH_DISKCACHE:
- dst->u.rw.nr_segments = src->u.rw.nr_segments;
- dst->u.rw.handle = src->u.rw.handle;
- dst->u.rw.id = src->u.rw.id;
- dst->u.rw.sector_number = src->u.rw.sector_number;
- barrier();
- if (n > dst->u.rw.nr_segments)
- n = dst->u.rw.nr_segments;
- for (i = 0; i < n; i++)
- dst->u.rw.seg[i] = src->u.rw.seg[i];
- break;
- case BLKIF_OP_DISCARD:
- dst->u.discard.flag = src->u.discard.flag;
- dst->u.discard.id = src->u.discard.id;
- dst->u.discard.sector_number = src->u.discard.sector_number;
- dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
- break;
- case BLKIF_OP_INDIRECT:
- dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
- dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
- dst->u.indirect.handle = src->u.indirect.handle;
- dst->u.indirect.id = src->u.indirect.id;
- dst->u.indirect.sector_number = src->u.indirect.sector_number;
- barrier();
- j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
- for (i = 0; i < j; i++)
- dst->u.indirect.indirect_grefs[i] =
- src->u.indirect.indirect_grefs[i];
- break;
- default:
- /*
- * Don't know how to translate this op. Only get the
- * ID so failure can be reported to the frontend.
- */
- dst->u.other.id = src->u.other.id;
- break;
- }
-}
-
-static inline void blkif_get_x86_64_req(struct blkif_request *dst,
- struct blkif_x86_64_request *src)
-{
- int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
- dst->operation = READ_ONCE(src->operation);
- switch (dst->operation) {
- case BLKIF_OP_READ:
- case BLKIF_OP_WRITE:
- case BLKIF_OP_WRITE_BARRIER:
- case BLKIF_OP_FLUSH_DISKCACHE:
- dst->u.rw.nr_segments = src->u.rw.nr_segments;
- dst->u.rw.handle = src->u.rw.handle;
- dst->u.rw.id = src->u.rw.id;
- dst->u.rw.sector_number = src->u.rw.sector_number;
- barrier();
- if (n > dst->u.rw.nr_segments)
- n = dst->u.rw.nr_segments;
- for (i = 0; i < n; i++)
- dst->u.rw.seg[i] = src->u.rw.seg[i];
- break;
- case BLKIF_OP_DISCARD:
- dst->u.discard.flag = src->u.discard.flag;
- dst->u.discard.id = src->u.discard.id;
- dst->u.discard.sector_number = src->u.discard.sector_number;
- dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
- break;
- case BLKIF_OP_INDIRECT:
- dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
- dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
- dst->u.indirect.handle = src->u.indirect.handle;
- dst->u.indirect.id = src->u.indirect.id;
- dst->u.indirect.sector_number = src->u.indirect.sector_number;
- barrier();
- j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
- for (i = 0; i < j; i++)
- dst->u.indirect.indirect_grefs[i] =
- src->u.indirect.indirect_grefs[i];
- break;
- default:
- /*
- * Don't know how to translate this op. Only get the
- * ID so failure can be reported to the frontend.
- */
- dst->u.other.id = src->u.other.id;
- break;
- }
-}
-
#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */