aboutsummaryrefslogtreecommitdiff
path: root/io_uring/io_uring.h
diff options
context:
space:
mode:
authorGravatar Pavel Begunkov <asml.silence@gmail.com> 2023-01-09 14:46:09 +0000
committerGravatar Jens Axboe <axboe@kernel.dk> 2023-01-29 15:17:40 -0700
commitbca39f39058567643487cd654970717705784ba3 (patch)
tree1f0282a4e401c5c06e453d3c0605588e89045f63 /io_uring/io_uring.h
parentio_uring: separate wq for ring polling (diff)
downloadlinux-bca39f39058567643487cd654970717705784ba3.tar.gz
linux-bca39f39058567643487cd654970717705784ba3.tar.bz2
linux-bca39f39058567643487cd654970717705784ba3.zip
io_uring: add lazy poll_wq activation
Even though io_poll_wq_wake()'s waitqueue_active reuses a barrier we do for another waitqueue, it's not going to be the case in the future and so we want to have a fast path for it when the ring has never been polled. Move poll_wq wake ups into __io_commit_cqring_flush() using a new flag called ->poll_activated. The idea behind the flag is to set it when the ring was polled for the first time. This requires additional sync to not miss events, which is done here by using task_work for ->task_complete rings, and by default enabling the flag for all other types of rings. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/060785e8e9137a920b232c0c7f575b131af19cac.1673274244.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.h')
-rw-r--r--io_uring/io_uring.h7
1 files changed, 3 insertions, 4 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index c75bbb94703c..5113e0ddb01d 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -222,7 +222,7 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
{
- if (waitqueue_active(&ctx->poll_wq))
+ if (wq_has_sleeper(&ctx->poll_wq))
__wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}
@@ -230,8 +230,6 @@ static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
/* requires smb_mb() prior, see wq_has_sleeper() */
static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
{
- io_poll_wq_wake(ctx);
-
/*
* Trigger waitqueue handler on all waiters on our waitqueue. This
* won't necessarily wake up all the tasks, io_should_wake() will make
@@ -316,7 +314,8 @@ static inline void io_req_complete_defer(struct io_kiocb *req)
static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
{
- if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
+ if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
+ ctx->has_evfd || ctx->poll_activated))
__io_commit_cqring_flush(ctx);
}