diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2020-03-04 16:14:11 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-03-04 11:39:06 -0700 |
commit | f462fd36fc43662eeb42c95a9b8da8659af6d75e (patch) | |
tree | 6be773b2ba796a7c669bd68c3e3ddffe454c15be /fs/io-wq.c | |
parent | 58e3931987377d3f4ec7bbc13e4ea0aab52dc6b0 (diff) |
io-wq: optimise out *next_work() double lock
When executing non-linked hashed work, io_worker_handle_work()
will lock-unlock wqe->lock to update hash, and then immediately
lock-unlock to get next work. Optimise this case and do
lock/unlock only once.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r-- | fs/io-wq.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c index 473af080470a..82e76011d409 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -474,11 +474,11 @@ static void io_worker_handle_work(struct io_worker *worker) { struct io_wqe *wqe = worker->wqe; struct io_wq *wq = wqe->wq; + unsigned hash = -1U; do { struct io_wq_work *work; - unsigned hash = -1U; - +get_next: /* * If we got some work, mark us as busy. If we didn't, but * the list isn't empty, it means we stalled on hashed work. @@ -524,9 +524,12 @@ static void io_worker_handle_work(struct io_worker *worker) spin_lock_irq(&wqe->lock); wqe->hash_map &= ~BIT_ULL(hash); wqe->flags &= ~IO_WQE_FLAG_STALLED; - spin_unlock_irq(&wqe->lock); /* dependent work is not hashed */ hash = -1U; + /* skip unnecessary unlock-lock wqe->lock */ + if (!work) + goto get_next; + spin_unlock_irq(&wqe->lock); } } while (work); |