diff options
author | Ingo Molnar <mingo@kernel.org> | 2024-03-12 09:49:52 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2024-03-12 09:55:57 +0100 |
commit | 2e2bc42c8381d2c0e9604b59e49264821da29368 (patch) | |
tree | c158510b5e7942b3a0d6eb6807cbeacf96035798 /io_uring/poll.c | |
parent | 428080c9b19bfda37c478cd626dbd3851db1aff9 (diff) | |
parent | 855684c7d938c2442f07eabc154e7532b4c1fbf9 (diff) |
Merge branch 'linus' into x86/boot, to resolve conflict
There's a new conflict with Linus's upstream tree, because
in the following merge conflict resolution in <asm/coco.h>:
38b334fc767e Merge tag 'x86_sev_for_v6.9_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Linus has resolved the conflicting placement of 'cc_mask' better
than the original commit:
1c811d403afd x86/sev: Fix position dependent variable references in startup code
... which was also done by an internal merge resolution:
2e5fc4786b7a Merge branch 'x86/sev' into x86/boot, to resolve conflicts and to pick up dependent tree
But Linus is right in 38b334fc767e, the 'cc_mask' declaration is sufficient
within the #ifdef CONFIG_ARCH_HAS_CC_PLATFORM block.
So instead of forcing Linus to do the same resolution again, merge in Linus's
tree and follow his conflict resolution.
Conflicts:
arch/x86/include/asm/coco.h
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'io_uring/poll.c')
-rw-r--r-- | io_uring/poll.c | 33 |
1 files changed, 17 insertions, 16 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c index 7513afc7b702..5f779139cae1 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -15,6 +15,7 @@ #include "io_uring.h" #include "refs.h" +#include "napi.h" #include "opdef.h" #include "kbuf.h" #include "poll.h" @@ -343,8 +344,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) * Release all references, retry if someone tried to restart * task_work while we were executing it. */ - } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & - IO_POLL_REF_MASK); + v &= IO_POLL_REF_MASK; + } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK); return IOU_POLL_NO_ACTION; } @@ -539,14 +540,6 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, poll->wait.private = (void *) wqe_private; if (poll->events & EPOLLEXCLUSIVE) { - /* - * Exclusive waits may only wake a limited amount of entries - * rather than all of them, this may interfere with lazy - * wake if someone does wait(events > 1). Ensure we don't do - * lazy wake for those, as we need to process each one as they - * come in. - */ - req->flags |= REQ_F_POLL_NO_LAZY; add_wait_queue_exclusive(head, &poll->wait); } else { add_wait_queue(head, &poll->wait); @@ -588,10 +581,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, struct io_poll_table *ipt, __poll_t mask, unsigned issue_flags) { - struct io_ring_ctx *ctx = req->ctx; - INIT_HLIST_NODE(&req->hash_node); - req->work.cancel_seq = atomic_read(&ctx->cancel_seq); io_init_poll_iocb(poll, mask); poll->file = req->file; req->apoll_events = poll->events; @@ -618,6 +608,17 @@ static int __io_arm_poll_handler(struct io_kiocb *req, if (issue_flags & IO_URING_F_UNLOCKED) req->flags &= ~REQ_F_HASH_LOCKED; + + /* + * Exclusive waits may only wake a limited amount of entries + * rather than all of them, this may interfere with lazy + * wake if someone does wait(events > 1). Ensure we don't do + * lazy wake for those, as we need to process each one as they + * come in. + */ + if (poll->events & EPOLLEXCLUSIVE) + req->flags |= REQ_F_POLL_NO_LAZY; + mask = vfs_poll(req->file, &ipt->pt) & poll->events; if (unlikely(ipt->error || !ipt->nr_entries)) { @@ -652,6 +653,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, __io_poll_execute(req, mask); return 0; } + io_napi_add(req); if (ipt->owning) { /* @@ -727,7 +729,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) if (!def->pollin && !def->pollout) return IO_APOLL_ABORTED; - if (!file_can_poll(req->file)) + if (!io_file_can_poll(req)) return IO_APOLL_ABORTED; if (!(req->flags & REQ_F_APOLL_MULTISHOT)) mask |= EPOLLONESHOT; @@ -818,9 +820,8 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, if (poll_only && req->opcode != IORING_OP_POLL_ADD) continue; if (cd->flags & IORING_ASYNC_CANCEL_ALL) { - if (cd->seq == req->work.cancel_seq) + if (io_cancel_match_sequence(req, cd->seq)) continue; - req->work.cancel_seq = cd->seq; } *out_bucket = hb; return req; |