summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorHao Xu <haoxu@linux.alibaba.com>2021-09-22 18:12:36 +0800
committerJens Axboe <axboe@kernel.dk>2021-09-24 10:24:34 -0600
commitbd99c71bd14072ce2920f6d0c2fe43df072c653c (patch)
tree2bfb3a10247a77a18deeb83f4f2387c9e76da197 /fs
parent87c1696655787895689618c8b63c5efe66b8f2ab (diff)
io_uring: fix race between poll completion and cancel_hash insertion
If poll arming and poll completion runs in parallel, there maybe races. For instance, run io_poll_add in iowq and io_poll_task_func in original context, then: iowq original context io_poll_add vfs_poll (interruption happens tw queued to original context) io_poll_task_func generate cqe del from cancel_hash[] if !poll.done insert to cancel_hash[] The entry left in cancel_hash[], similar case for fast poll. Fix it by set poll.done = true when del from cancel_hash[]. Fixes: 5082620fb2ca ("io_uring: terminate multishot poll for CQ ring overflow") Signed-off-by: Hao Xu <haoxu@linux.alibaba.com> Link: https://lore.kernel.org/r/20210922101238.7177-2-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e372d5b9f6dc..43530aae6180 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5337,10 +5337,8 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
}
if (req->poll.events & EPOLLONESHOT)
flags = 0;
- if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
- req->poll.done = true;
+ if (!io_cqring_fill_event(ctx, req->user_data, error, flags))
flags = 0;
- }
if (flags & IORING_CQE_F_MORE)
ctx->cq_extra++;
@@ -5371,6 +5369,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
if (done) {
io_poll_remove_double(req);
hash_del(&req->hash_node);
+ req->poll.done = true;
} else {
req->result = 0;
add_wait_queue(req->poll.head, &req->poll.wait);
@@ -5508,6 +5507,7 @@ static void io_async_task_func(struct io_kiocb *req, bool *locked)
hash_del(&req->hash_node);
io_poll_remove_double(req);
+ apoll->poll.done = true;
spin_unlock(&ctx->completion_lock);
if (!READ_ONCE(apoll->poll.canceled))