aboutsummaryrefslogtreecommitdiff
path: root/include/linux/io_uring_types.h
diff options
context:
space:
mode:
authorGravatar Jens Axboe <axboe@kernel.dk> 2022-07-07 14:30:09 -0600
committerGravatar Jens Axboe <axboe@kernel.dk> 2022-07-24 18:39:17 -0600
commit43e0bbbd0b0e30d232fd8e9e908125b5c49a9fbc (patch)
tree36990dc646e88f2bc176bb4873fa783b09ac9c69 /include/linux/io_uring_types.h
parentio_uring: impose max limit on apoll cache (diff)
downloadlinux-43e0bbbd0b0e30d232fd8e9e908125b5c49a9fbc.tar.gz
linux-43e0bbbd0b0e30d232fd8e9e908125b5c49a9fbc.tar.bz2
linux-43e0bbbd0b0e30d232fd8e9e908125b5c49a9fbc.zip
io_uring: add netmsg cache
For recvmsg/sendmsg, if they don't complete inline, we currently need to allocate a struct io_async_msghdr for each request. This is a somewhat large struct. Hook up sendmsg/recvmsg to use the io_alloc_cache. This reduces the alloc + free overhead considerably, yielding 4-5% of extra performance running netbench. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/io_uring_types.h')
-rw-r--r--include/linux/io_uring_types.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index bf8f95332eda..d54b8b7e0746 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -222,8 +222,7 @@ struct io_ring_ctx {
struct io_hash_table cancel_table_locked;
struct list_head cq_overflow_list;
struct io_alloc_cache apoll_cache;
- struct xarray personalities;
- u32 pers_next;
+ struct io_alloc_cache netmsg_cache;
} ____cacheline_aligned_in_smp;
/* IRQ completion list, under ->completion_lock */
@@ -241,6 +240,9 @@ struct io_ring_ctx {
unsigned int file_alloc_start;
unsigned int file_alloc_end;
+ struct xarray personalities;
+ u32 pers_next;
+
struct {
/*
* We cache a range of free CQEs we can use, once exhausted it