summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-08-18 17:01:43 +0100
committerJens Axboe <axboe@kernel.dk>2021-08-23 13:10:46 -0600
commite98e49b2bbf777f91732dc916d7ad33876c663c9 (patch)
tree200391fb33540329622ee38ebeb11cdd1415d39b /fs/io_uring.c
parent316319e82f7342ef327223a23199648bfabeadcd (diff)
io_uring: extend task put optimisations
Now with IRQ completions done via IRQ, almost all requests freeing are done from the context of submitter task, so it makes sense to extend task_put optimisation from io_req_free_batch_finish() to cover all the cases including task_work by moving it into io_put_task(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/824a7cbd745ddeee4a0f3ff85c558a24fd005872.1629302453.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 78aa4daf7969..9107cd78863d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1623,10 +1623,14 @@ static inline void io_put_task(struct task_struct *task, int nr)
{
struct io_uring_task *tctx = task->io_uring;
- percpu_counter_sub(&tctx->inflight, nr);
- if (unlikely(atomic_read(&tctx->in_idle)))
- wake_up(&tctx->wait);
- put_task_struct_many(task, nr);
+ if (likely(task == current)) {
+ tctx->cached_refs += nr;
+ } else {
+ percpu_counter_sub(&tctx->inflight, nr);
+ if (unlikely(atomic_read(&tctx->in_idle)))
+ wake_up(&tctx->wait);
+ put_task_struct_many(task, nr);
+ }
}
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
@@ -2171,9 +2175,7 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
{
if (rb->ctx_refs)
percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
- if (rb->task == current)
- current->io_uring->cached_refs += rb->task_refs;
- else if (rb->task)
+ if (rb->task)
io_put_task(rb->task, rb->task_refs);
}