aboutsummaryrefslogtreecommitdiff
path: root/io_uring/io_uring.h
diff options
context:
space:
mode:
authorGravatar Dylan Yudaken <dylany@meta.com> 2022-11-24 01:35:55 -0800
committerGravatar Jens Axboe <axboe@kernel.dk> 2022-11-25 06:10:04 -0700
commit9b8c54755a2b16d4f23c0ea184b75e2edf77d906 (patch)
treef5084117ac5bfeca0b7ce2c50b8d276411bc5f6e /io_uring/io_uring.h
parentio_uring: allow defer completion for aux posted cqes (diff)
downloadlinux-9b8c54755a2b16d4f23c0ea184b75e2edf77d906.tar.gz
linux-9b8c54755a2b16d4f23c0ea184b75e2edf77d906.tar.bz2
linux-9b8c54755a2b16d4f23c0ea184b75e2edf77d906.zip
io_uring: add io_aux_cqe which allows deferred completion
Use the just introduced deferred post cqe completion state when possible in io_aux_cqe. If not possible fallback to io_post_aux_cqe. This introduces a complication because of allow_overflow. For deferred completions we cannot know without locking the completion_lock if it will overflow (and even if we locked it, another post could sneak in and cause this cqe to be in overflow). However since overflow protection is mostly a best effort defence in depth to prevent infinite loops of CQEs for poll, just checking the overflow bit is going to be good enough and will result in at most 16 (array size of deferred cqes) overflows. Suggested-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Dylan Yudaken <dylany@meta.com> Link: https://lore.kernel.org/r/20221124093559.3780686-6-dylany@meta.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.h')
-rw-r--r--io_uring/io_uring.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index a26d5aa7f3f3..dd02adf3d0df 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -36,6 +36,8 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
bool allow_overflow);
bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
bool allow_overflow);
+bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
+ bool allow_overflow);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
static inline void io_req_complete_post_tw(struct io_kiocb *req, bool *locked)