summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorOlivier Langlois <olivier@trillion01.com>2024-10-13 14:29:02 -0400
committerJens Axboe <axboe@kernel.dk>2024-11-06 13:55:38 -0700
commitdb1e1adf6f993b1c2cef605d86eff709a8db5052 (patch)
treecbf343b72dad1745cff701fb9c954acb0eab3e5b /io_uring
parenta5e26f49fef9485bc4ae24666d984a6de11e058c (diff)
io_uring/napi: Use lock guards
Convert napi locks to use the shiny new Scope-Based Resource Management machinery. Signed-off-by: Olivier Langlois <olivier@trillion01.com> Link: https://lore.kernel.org/r/2680ca47ee183cfdb89d1a40c84d349edeb620ab.1728828877.git.olivier@trillion01.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/napi.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/io_uring/napi.c b/io_uring/napi.c
index 5e2299e7ff8e..6d5fdd397f2f 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -49,14 +49,13 @@ int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))];
- rcu_read_lock();
- e = io_napi_hash_find(hash_list, napi_id);
- if (e) {
- WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
- rcu_read_unlock();
- return -EEXIST;
+ scoped_guard(rcu) {
+ e = io_napi_hash_find(hash_list, napi_id);
+ if (e) {
+ WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
+ return -EEXIST;
+ }
}
- rcu_read_unlock();
e = kmalloc(sizeof(*e), GFP_NOWAIT);
if (!e)
@@ -65,6 +64,10 @@ int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
e->napi_id = napi_id;
e->timeout = jiffies + NAPI_TIMEOUT;
+ /*
+ * guard(spinlock) is not used to manually unlock it before calling
+ * kfree()
+ */
spin_lock(&ctx->napi_lock);
if (unlikely(io_napi_hash_find(hash_list, napi_id))) {
spin_unlock(&ctx->napi_lock);
@@ -82,7 +85,7 @@ static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
{
struct io_napi_entry *e;
- spin_lock(&ctx->napi_lock);
+ guard(spinlock)(&ctx->napi_lock);
/*
* list_for_each_entry_safe() is not required as long as:
* 1. list_del_rcu() does not reset the deleted node next pointer
@@ -96,7 +99,6 @@ static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
kfree_rcu(e, rcu);
}
}
- spin_unlock(&ctx->napi_lock);
}
static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
@@ -168,11 +170,12 @@ static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
if (list_is_singular(&ctx->napi_list))
loop_end_arg = iowq;
- rcu_read_lock();
- do {
- is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
- } while (!io_napi_busy_loop_should_end(iowq, start_time) && !loop_end_arg);
- rcu_read_unlock();
+ scoped_guard(rcu) {
+ do {
+ is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
+ } while (!io_napi_busy_loop_should_end(iowq, start_time) &&
+ !loop_end_arg);
+ }
io_napi_remove_stale(ctx, is_stale);
}
@@ -203,13 +206,12 @@ void io_napi_free(struct io_ring_ctx *ctx)
{
struct io_napi_entry *e;
- spin_lock(&ctx->napi_lock);
+ guard(spinlock)(&ctx->napi_lock);
list_for_each_entry(e, &ctx->napi_list, list) {
hash_del_rcu(&e->node);
kfree_rcu(e, rcu);
}
INIT_LIST_HEAD_RCU(&ctx->napi_list);
- spin_unlock(&ctx->napi_lock);
}
/*
@@ -305,9 +307,9 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
if (list_empty_careful(&ctx->napi_list))
return 0;
- rcu_read_lock();
- is_stale = __io_napi_do_busy_loop(ctx, NULL);
- rcu_read_unlock();
+ scoped_guard(rcu) {
+ is_stale = __io_napi_do_busy_loop(ctx, NULL);
+ }
io_napi_remove_stale(ctx, is_stale);
return 1;