aboutsummaryrefslogtreecommitdiff
path: root/io_uring/cancel.h
diff options
context:
space:
mode:
authorGravatar Jens Axboe <axboe@kernel.dk> 2024-01-28 20:11:55 -0700
committerGravatar Jens Axboe <axboe@kernel.dk> 2024-02-08 13:27:06 -0700
commit521223d7c229f83915619f888c99e952f24dc39f (patch)
treeb19885749949ac462562ebc474b8964edc85331f /io_uring/cancel.h
parentio_uring: expand main struct io_kiocb flags to 64-bits (diff)
downloadlinux-521223d7c229f83915619f888c99e952f24dc39f.tar.gz
linux-521223d7c229f83915619f888c99e952f24dc39f.tar.bz2
linux-521223d7c229f83915619f888c99e952f24dc39f.zip
io_uring/cancel: don't default to setting req->work.cancel_seq
Just leave it unset by default, avoiding dipping into the last cacheline (which is otherwise untouched) for the fast path of using poll to drive networked traffic. Add a flag that tells us if the sequence is valid or not, and then we can defer actually assigning the flag and sequence until someone runs cancelations. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/cancel.h')
-rw-r--r--io_uring/cancel.h10
1 files changed, 10 insertions, 0 deletions
diff --git a/io_uring/cancel.h b/io_uring/cancel.h
index c0a8e7c520b6..76b32e65c03c 100644
--- a/io_uring/cancel.h
+++ b/io_uring/cancel.h
@@ -25,4 +25,14 @@ void init_hash_table(struct io_hash_table *table, unsigned size);
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
+static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
+{
+ if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq)
+ return true;
+
+ req->flags |= REQ_F_CANCEL_SEQ;
+ req->work.cancel_seq = sequence;
+ return false;
+}
+
#endif