summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-06-02 08:41:46 -0600
committerJens Axboe <axboe@kernel.dk>2023-06-02 08:55:37 -0600
commitc92fcfc2bab54451c4f1481755ea244f413455cb (patch)
treec4a1474020ef5cf5138b5b34c5f35476ce8a1d49 /io_uring/io_uring.c
parentf026be0e1e881e3395c3d5418ffc8c2a2203c3f3 (diff)
io_uring: avoid indirect function calls for the hottest task_work
We use task_work for a variety of reasons, but doing completions or triggering rety after poll are by far the hottest two. Use the indirect funtion call wrappers to avoid the indirect function call if CONFIG_RETPOLINE is set. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index c99a7a0c3f21..fc511cb6761d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -95,6 +95,7 @@
#include "timeout.h"
#include "poll.h"
+#include "rw.h"
#include "alloc_cache.h"
#define IORING_MAX_ENTRIES 32768
@@ -1205,7 +1206,9 @@ static unsigned int handle_tw_list(struct llist_node *node,
ts->locked = mutex_trylock(&(*ctx)->uring_lock);
percpu_ref_get(&(*ctx)->refs);
}
- req->io_task_work.func(req, ts);
+ INDIRECT_CALL_2(req->io_task_work.func,
+ io_poll_task_func, io_req_rw_complete,
+ req, ts);
node = next;
count++;
if (unlikely(need_resched())) {
@@ -1415,7 +1418,9 @@ again:
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
prefetch(container_of(next, struct io_kiocb, io_task_work.node));
- req->io_task_work.func(req, ts);
+ INDIRECT_CALL_2(req->io_task_work.func,
+ io_poll_task_func, io_req_rw_complete,
+ req, ts);
ret++;
node = next;
}