aboutsummaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorGravatar Pavel Begunkov <asml.silence@gmail.com> 2024-04-10 02:26:54 +0100
committerGravatar Jens Axboe <axboe@kernel.dk> 2024-04-15 08:10:26 -0600
commit8d09a88ef9d3cb7d21d45c39b7b7c31298d23998 (patch)
tree17b8ffd1386460decdad9238aa46959e88532e9b /io_uring
parentio_uring: open code io_cqring_overflow_flush() (diff)
downloadlinux-8d09a88ef9d3cb7d21d45c39b7b7c31298d23998.tar.gz
linux-8d09a88ef9d3cb7d21d45c39b7b7c31298d23998.tar.bz2
linux-8d09a88ef9d3cb7d21d45c39b7b7c31298d23998.zip
io_uring: always lock __io_cqring_overflow_flush
Conditional locking is never great, in case of __io_cqring_overflow_flush(), which is a slow path, it's not justified. Don't handle IOPOLL separately, always grab uring_lock for overflow flushing. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/162947df299aa12693ac4b305dacedab32ec7976.1712708261.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 740bfec10fde..92ac9c0fc597 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -673,6 +673,8 @@ static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
struct io_overflow_cqe *ocqe;
LIST_HEAD(list);
+ lockdep_assert_held(&ctx->uring_lock);
+
spin_lock(&ctx->completion_lock);
list_splice_init(&ctx->cq_overflow_list, &list);
clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
@@ -689,6 +691,8 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{
size_t cqe_size = sizeof(struct io_uring_cqe);
+ lockdep_assert_held(&ctx->uring_lock);
+
if (__io_cqring_events(ctx) == ctx->cq_entries)
return;
@@ -718,12 +722,9 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
{
- /* iopoll syncs against uring_lock, not completion_lock */
- if (ctx->flags & IORING_SETUP_IOPOLL)
- mutex_lock(&ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
__io_cqring_overflow_flush(ctx);
- if (ctx->flags & IORING_SETUP_IOPOLL)
- mutex_unlock(&ctx->uring_lock);
+ mutex_unlock(&ctx->uring_lock);
}
/* can be called by any task */
@@ -1522,6 +1523,8 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
unsigned int nr_events = 0;
unsigned long check_cq;
+ lockdep_assert_held(&ctx->uring_lock);
+
if (!io_allowed_run_tw(ctx))
return -EEXIST;