aboutsummaryrefslogtreecommitdiff
path: root/io_uring/kbuf.c
diff options
context:
space:
mode:
authorGravatar Hao Xu <howeyxu@tencent.com> 2022-06-17 13:04:29 +0800
committerGravatar Jens Axboe <axboe@kernel.dk> 2022-07-24 18:39:14 -0600
commitf09c8643f0fad0e287b9f737955276000fd76a5d (patch)
treed968fe6a0471e999ccfe624763e6bca1eb750c8b /io_uring/kbuf.c
parentio_uring: mutex locked poll hashing (diff)
downloadlinux-f09c8643f0fad0e287b9f737955276000fd76a5d.tar.gz
linux-f09c8643f0fad0e287b9f737955276000fd76a5d.tar.bz2
linux-f09c8643f0fad0e287b9f737955276000fd76a5d.zip
io_uring: kbuf: add comments for some tricky code
Add comments to explain why it is always under uring lock when incrementing head in __io_kbuf_recycle. And rectify one comemnt about kbuf consuming in iowq case. Signed-off-by: Hao Xu <howeyxu@tencent.com> Link: https://lore.kernel.org/r/20220617050429.94293-1-hao.xu@linux.dev Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r--io_uring/kbuf.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index b9c7f6e87cc9..59e4fafeb28c 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -52,6 +52,13 @@ void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
if (req->flags & REQ_F_BUFFER_RING) {
if (req->buf_list) {
if (req->flags & REQ_F_PARTIAL_IO) {
+ /*
+ * If we end up here, then the io_uring_lock has
+ * been kept held since we retrieved the buffer.
+ * For the io-wq case, we already cleared
+ * req->buf_list when the buffer was retrieved,
+ * hence it cannot be set here for that case.
+ */
req->buf_list->head++;
req->buf_list = NULL;
} else {
@@ -163,12 +170,13 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
/*
* If we came in unlocked, we have no choice but to consume the
- * buffer here. This does mean it'll be pinned until the IO
- * completes. But coming in unlocked means we're in io-wq
- * context, hence there should be no further retry. For the
- * locked case, the caller must ensure to call the commit when
- * the transfer completes (or if we get -EAGAIN and must poll
- * or retry).
+ * buffer here, otherwise nothing ensures that the buffer won't
+ * get used by others. This does mean it'll be pinned until the
+ * IO completes, coming in unlocked means we're being called from
+ * io-wq context and there may be further retries in async hybrid
+ * mode. For the locked case, the caller must call commit when
+ * the transfer completes (or if we get -EAGAIN and must poll of
+ * retry).
*/
req->buf_list = NULL;
bl->head++;