From fe62de310e2b563c0d303a09d06b020077fe86b4 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 11 Dec 2020 13:58:24 -0800 Subject: libbpf: Support modules in bpf_program__set_attach_target() API Support finding kernel targets in kernel modules when using bpf_program__set_attach_target() API. This brings it up to par with what libbpf supports when doing declarative SEC()-based target determination. Some minor internal refactoring was needed to make sure vmlinux BTF can be loaded before bpf_object's load phase. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20201211215825.3646154-2-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 64 +++++++++++++++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 24 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 9be88a90a4aa..6ae748f6ea11 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2518,7 +2518,7 @@ static int bpf_object__finalize_btf(struct bpf_object *obj) return 0; } -static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog) +static bool prog_needs_vmlinux_btf(struct bpf_program *prog) { if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || prog->type == BPF_PROG_TYPE_LSM) @@ -2533,37 +2533,43 @@ static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog) return false; } -static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) +static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) { - bool need_vmlinux_btf = false; struct bpf_program *prog; - int i, err; + int i; /* CO-RE relocations need kernel BTF */ if (obj->btf_ext && obj->btf_ext->core_relo_info.len) - need_vmlinux_btf = true; + return true; /* Support for typed ksyms needs kernel BTF */ for (i = 0; i < obj->nr_extern; i++) { const struct extern_desc *ext; ext = &obj->externs[i]; - if (ext->type == EXT_KSYM && ext->ksym.type_id) { - need_vmlinux_btf = true; - break; - } + if (ext->type == EXT_KSYM && ext->ksym.type_id) + return true; } bpf_object__for_each_program(prog, obj) { if (!prog->load) continue; - if (libbpf_prog_needs_vmlinux_btf(prog)) { - need_vmlinux_btf = true; - break; - } + if (prog_needs_vmlinux_btf(prog)) + return true; } - if (!need_vmlinux_btf) + return false; +} + +static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) +{ + int err; + + /* btf_vmlinux could be loaded earlier */ + if (obj->btf_vmlinux) + return 0; + + if (!force && !obj_needs_vmlinux_btf(obj)) return 0; obj->btf_vmlinux = libbpf_find_kernel_btf(); @@ -7475,7 +7481,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) } err = bpf_object__probe_loading(obj); - err = err ? : bpf_object__load_vmlinux_btf(obj); + err = err ? : bpf_object__load_vmlinux_btf(obj, false); err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); err = err ? : bpf_object__sanitize_and_load_btf(obj); err = err ? : bpf_object__sanitize_maps(obj); @@ -10870,23 +10876,33 @@ int bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, const char *attach_func_name) { - int btf_id; + int btf_obj_fd = 0, btf_id = 0, err; if (!prog || attach_prog_fd < 0 || !attach_func_name) return -EINVAL; - if (attach_prog_fd) + if (prog->obj->loaded) + return -EINVAL; + + if (attach_prog_fd) { btf_id = libbpf_find_prog_btf_id(attach_func_name, attach_prog_fd); - else - btf_id = libbpf_find_vmlinux_btf_id(attach_func_name, - prog->expected_attach_type); - - if (btf_id < 0) - return btf_id; + if (btf_id < 0) + return btf_id; + } else { + /* load btf_vmlinux, if not yet */ + err = bpf_object__load_vmlinux_btf(prog->obj, true); + if (err) + return err; + err = find_kernel_btf_id(prog->obj, attach_func_name, + prog->expected_attach_type, + &btf_obj_fd, &btf_id); + if (err) + return err; + } prog->attach_btf_id = btf_id; - prog->attach_btf_obj_fd = 0; + prog->attach_btf_obj_fd = btf_obj_fd; prog->attach_prog_fd = attach_prog_fd; return 0; } -- cgit v1.2.3 From a4d2a7ad86834092b327082004ead755d2603376 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 14 Dec 2020 11:38:12 +0000 Subject: libbpf: Expose libbpf ring_buffer epoll_fd This provides a convenient perf ringbuf -> libbpf ringbuf migration path for users of external polling systems. It is analogous to perf_buffer__epoll_fd. Signed-off-by: Brendan Jackman Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20201214113812.305274-1-jackmanb@google.com --- tools/lib/bpf/libbpf.h | 1 + tools/lib/bpf/libbpf.map | 1 + tools/lib/bpf/ringbuf.c | 6 ++++++ 3 files changed, 8 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 6909ee81113a..3c35eb401931 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -536,6 +536,7 @@ LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd, ring_buffer_sample_fn sample_cb, void *ctx); LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms); LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb); +LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb); /* Perf buffer APIs */ struct perf_buffer; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 7c4126542e2b..1c0fd2dd233a 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -346,6 +346,7 @@ LIBBPF_0.3.0 { btf__parse_split; btf__new_empty_split; btf__new_split; + ring_buffer__epoll_fd; xsk_setup_xdp_prog; xsk_socket__update_xskmap; } LIBBPF_0.2.0; diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c index 5c6522c89af1..a8f997d47adf 100644 --- a/tools/lib/bpf/ringbuf.c +++ b/tools/lib/bpf/ringbuf.c @@ -282,3 +282,9 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms) } return cnt < 0 ? -errno : res; } + +/* Get an fd that can be used to sleep until data is available in the ring(s) */ +int ring_buffer__epoll_fd(const struct ring_buffer *rb) +{ + return rb->epoll_fd; +} -- cgit v1.2.3