aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorGravatar Lorenz Bauer <lmb@cloudflare.com> 2020-03-09 11:12:34 +0000
committerGravatar Daniel Borkmann <daniel@iogearbox.net> 2020-03-09 22:34:58 +0100
commitd19da360ee0f3e6c1375391db1a724b66fd43312 (patch)
treeb21f3e34ff2a865b68592c282f2148040cde2494 /net
parentskmsg: Update saved hooks only once (diff)
downloadlinux-d19da360ee0f3e6c1375391db1a724b66fd43312.tar.gz
linux-d19da360ee0f3e6c1375391db1a724b66fd43312.tar.bz2
linux-d19da360ee0f3e6c1375391db1a724b66fd43312.zip
bpf: tcp: Move assertions into tcp_bpf_get_proto
We need to ensure that sk->sk_prot uses certain callbacks, so that code that directly calls e.g. tcp_sendmsg in certain corner cases works. To avoid spurious asserts, we must to do this only if sk_psock_update_proto has not yet been called. The same invariants apply for tcp_bpf_check_v6_needs_rebuild, so move the call as well. Doing so allows us to merge tcp_bpf_init and tcp_bpf_reinit. Signed-off-by: Lorenz Bauer <lmb@cloudflare.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200309111243.6982-4-lmb@cloudflare.com
Diffstat (limited to 'net')
-rw-r--r--net/core/sock_map.c25
-rw-r--r--net/ipv4/tcp_bpf.c42
2 files changed, 31 insertions, 36 deletions
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index cb8f740f7949..fafcbd22ecba 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -145,8 +145,8 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
struct sock *sk)
{
struct bpf_prog *msg_parser, *skb_parser, *skb_verdict;
- bool skb_progs, sk_psock_is_new = false;
struct sk_psock *psock;
+ bool skb_progs;
int ret;
skb_verdict = READ_ONCE(progs->skb_verdict);
@@ -191,18 +191,14 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
ret = -ENOMEM;
goto out_progs;
}
- sk_psock_is_new = true;
}
if (msg_parser)
psock_set_prog(&psock->progs.msg_parser, msg_parser);
- if (sk_psock_is_new) {
- ret = tcp_bpf_init(sk);
- if (ret < 0)
- goto out_drop;
- } else {
- tcp_bpf_reinit(sk);
- }
+
+ ret = tcp_bpf_init(sk);
+ if (ret < 0)
+ goto out_drop;
write_lock_bh(&sk->sk_callback_lock);
if (skb_progs && !psock->parser.enabled) {
@@ -239,15 +235,12 @@ static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk)
if (IS_ERR(psock))
return PTR_ERR(psock);
- if (psock) {
- tcp_bpf_reinit(sk);
- return 0;
+ if (!psock) {
+ psock = sk_psock_init(sk, map->numa_node);
+ if (!psock)
+ return -ENOMEM;
}
- psock = sk_psock_init(sk, map->numa_node);
- if (!psock)
- return -ENOMEM;
-
ret = tcp_bpf_init(sk);
if (ret < 0)
sk_psock_put(sk, psock);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 3327afa05c3d..ed8a8f3c9afe 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -629,14 +629,6 @@ static int __init tcp_bpf_v4_build_proto(void)
}
core_initcall(tcp_bpf_v4_build_proto);
-static void tcp_bpf_update_sk_prot(struct sock *sk, struct sk_psock *psock)
-{
- int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
- int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
-
- sk_psock_update_proto(sk, psock, &tcp_bpf_prots[family][config]);
-}
-
static int tcp_bpf_assert_proto_ops(struct proto *ops)
{
/* In order to avoid retpoline, we make assumptions when we call
@@ -648,34 +640,44 @@ static int tcp_bpf_assert_proto_ops(struct proto *ops)
ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP;
}
-void tcp_bpf_reinit(struct sock *sk)
+static struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock)
{
- struct sk_psock *psock;
+ int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
+ int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
- sock_owned_by_me(sk);
+ if (!psock->sk_proto) {
+ struct proto *ops = READ_ONCE(sk->sk_prot);
- rcu_read_lock();
- psock = sk_psock(sk);
- tcp_bpf_update_sk_prot(sk, psock);
- rcu_read_unlock();
+ if (tcp_bpf_assert_proto_ops(ops))
+ return ERR_PTR(-EINVAL);
+
+ tcp_bpf_check_v6_needs_rebuild(sk, ops);
+ }
+
+ return &tcp_bpf_prots[family][config];
}
int tcp_bpf_init(struct sock *sk)
{
- struct proto *ops = READ_ONCE(sk->sk_prot);
struct sk_psock *psock;
+ struct proto *prot;
sock_owned_by_me(sk);
rcu_read_lock();
psock = sk_psock(sk);
- if (unlikely(!psock || psock->sk_proto ||
- tcp_bpf_assert_proto_ops(ops))) {
+ if (unlikely(!psock)) {
rcu_read_unlock();
return -EINVAL;
}
- tcp_bpf_check_v6_needs_rebuild(sk, ops);
- tcp_bpf_update_sk_prot(sk, psock);
+
+ prot = tcp_bpf_get_proto(sk, psock);
+ if (IS_ERR(prot)) {
+ rcu_read_unlock();
+ return PTR_ERR(prot);
+ }
+
+ sk_psock_update_proto(sk, psock, prot);
rcu_read_unlock();
return 0;
}