aboutsummaryrefslogtreecommitdiff
path: root/net/bpf
diff options
context:
space:
mode:
authorGravatar Kui-Feng Lee <thinker.li@gmail.com> 2024-02-24 14:34:17 -0800
committerGravatar Martin KaFai Lau <martin.lau@kernel.org> 2024-03-04 14:09:20 -0800
commit187e2af05abe6bf80581490239c449456627d17a (patch)
tree573c922b888bd957230d958b50a0756ce457262b /net/bpf
parentbpf, net: validate struct_ops when updating value. (diff)
downloadlinux-187e2af05abe6bf80581490239c449456627d17a.tar.gz
linux-187e2af05abe6bf80581490239c449456627d17a.tar.bz2
linux-187e2af05abe6bf80581490239c449456627d17a.zip
bpf: struct_ops supports more than one page for trampolines.
The BPF struct_ops previously only allowed one page of trampolines. Each function pointer of a struct_ops is implemented by a struct_ops bpf program. Each struct_ops bpf program requires a trampoline. The following selftest patch shows each page can hold a little more than 20 trampolines. While one page is more than enough for the tcp-cc usecase, the sched_ext use case shows that one page is not always enough and hits the one page limit. This patch overcomes the one page limit by allocating another page when needed and it is limited to a total of MAX_IMAGE_PAGES (8) pages which is more than enough for reasonable usages. The variable st_map->image has been changed to st_map->image_pages, and its type has been changed to an array of pointers to pages. Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com> Link: https://lore.kernel.org/r/20240224223418.526631-3-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Diffstat (limited to 'net/bpf')
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 02de71719aed..1b5f812e6972 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -91,6 +91,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
struct bpf_tramp_link *link = NULL;
void *image = NULL;
unsigned int op_idx;
+ u32 image_off = 0;
int prog_ret;
s32 type_id;
int err;
@@ -114,12 +115,6 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
goto out;
}
- image = arch_alloc_bpf_trampoline(PAGE_SIZE);
- if (!image) {
- err = -ENOMEM;
- goto out;
- }
-
link = kzalloc(sizeof(*link), GFP_USER);
if (!link) {
err = -ENOMEM;
@@ -133,7 +128,8 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
&st_ops->func_models[op_idx],
&dummy_ops_test_ret_function,
- image, image + PAGE_SIZE);
+ &image, &image_off,
+ true);
if (err < 0)
goto out;
@@ -147,7 +143,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
err = -EFAULT;
out:
kfree(args);
- arch_free_bpf_trampoline(image, PAGE_SIZE);
+ bpf_struct_ops_image_free(image);
if (link)
bpf_link_put(&link->link);
kfree(tlinks);