Skip to content

Commit e295a26

Browse files
AsphalttKernel Patches Daemon
authored andcommitted
selftests/bpf: Add tests to verify no memleak when updating hash and cgrp storage maps
Add tests to verify that updating hash and local storage maps does not leak memory when BPF_KPTR_REF objects are involved. The tests perform the following steps: 1. Call update_elem() to insert an initial value. 2. Use bpf_refcount_acquire() to increment the refcount. 3. Store the node pointer in the map value. 4. Add the node to a linked list. 5. Probe-read the refcount and verify it is *2*. 6. Call update_elem() again to trigger refcount decrement. 7. Probe-read the refcount and verify it is *1*. Signed-off-by: Leon Hwang <[email protected]>
1 parent f469dfa commit e295a26

File tree

2 files changed

+326
-1
lines changed

2 files changed

+326
-1
lines changed

tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c

Lines changed: 166 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
#include <test_progs.h>
55
#include <network_helpers.h>
6-
6+
#include "cgroup_helpers.h"
77
#include "refcounted_kptr.skel.h"
88
#include "refcounted_kptr_fail.skel.h"
99

@@ -44,3 +44,168 @@ void test_refcounted_kptr_wrong_owner(void)
4444
ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval");
4545
refcounted_kptr__destroy(skel);
4646
}
47+
48+
static void test_refcnt_leak(void *values, size_t values_sz, u64 flags, struct bpf_map *map,
49+
struct bpf_program *prog_leak, struct bpf_program *prog_check)
50+
{
51+
int ret, fd, key = 0;
52+
LIBBPF_OPTS(bpf_test_run_opts, opts,
53+
.data_in = &pkt_v4,
54+
.data_size_in = sizeof(pkt_v4),
55+
.repeat = 1,
56+
);
57+
58+
ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags);
59+
if (!ASSERT_OK(ret, "bpf_map__update_elem init"))
60+
return;
61+
62+
fd = bpf_program__fd(prog_leak);
63+
ret = bpf_prog_test_run_opts(fd, &opts);
64+
if (!ASSERT_OK(ret, "test_run_opts"))
65+
return;
66+
if (!ASSERT_EQ(opts.retval, 2, "retval refcount"))
67+
return;
68+
69+
ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags);
70+
if (!ASSERT_OK(ret, "bpf_map__update_elem dec refcount"))
71+
return;
72+
73+
fd = bpf_program__fd(prog_check);
74+
ret = bpf_prog_test_run_opts(fd, &opts);
75+
ASSERT_OK(ret, "test_run_opts");
76+
ASSERT_EQ(opts.retval, 1, "retval");
77+
}
78+
79+
static void test_percpu_hash_refcount_leak(void)
80+
{
81+
struct refcounted_kptr *skel;
82+
size_t values_sz;
83+
u64 *values;
84+
int cpu_nr;
85+
86+
cpu_nr = libbpf_num_possible_cpus();
87+
if (!ASSERT_GT(cpu_nr, 0, "libbpf_num_possible_cpus"))
88+
return;
89+
90+
values = calloc(cpu_nr, sizeof(u64));
91+
if (!ASSERT_OK_PTR(values, "calloc values"))
92+
return;
93+
94+
skel = refcounted_kptr__open_and_load();
95+
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) {
96+
free(values);
97+
return;
98+
}
99+
100+
values_sz = cpu_nr * sizeof(u64);
101+
memset(values, 0, values_sz);
102+
103+
test_refcnt_leak(values, values_sz, 0, skel->maps.pcpu_hash,
104+
skel->progs.pcpu_hash_refcount_leak,
105+
skel->progs.check_pcpu_hash_refcount);
106+
107+
refcounted_kptr__destroy(skel);
108+
free(values);
109+
}
110+
111+
struct lock_map_value {
112+
u64 kptr;
113+
struct bpf_spin_lock lock;
114+
int value;
115+
};
116+
117+
static void test_hash_lock_refcount_leak(void)
118+
{
119+
struct lock_map_value value = {};
120+
struct refcounted_kptr *skel;
121+
122+
skel = refcounted_kptr__open_and_load();
123+
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
124+
return;
125+
126+
test_refcnt_leak(&value, sizeof(value), BPF_F_LOCK, skel->maps.lock_hash,
127+
skel->progs.hash_lock_refcount_leak,
128+
skel->progs.check_hash_lock_refcount);
129+
130+
refcounted_kptr__destroy(skel);
131+
}
132+
133+
static void test_cgroup_storage_lock_refcount_leak(void)
134+
{
135+
int server_fd = -1, client_fd = -1;
136+
struct lock_map_value value = {};
137+
struct refcounted_kptr *skel;
138+
u64 flags = BPF_F_LOCK;
139+
struct bpf_link *link;
140+
struct bpf_map *map;
141+
int cgroup, err;
142+
143+
cgroup = test__join_cgroup("/cg_refcount_leak");
144+
if (!ASSERT_GE(cgroup, 0, "test__join_cgroup"))
145+
return;
146+
147+
skel = refcounted_kptr__open_and_load();
148+
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
149+
goto out;
150+
151+
link = bpf_program__attach_cgroup(skel->progs.cgroup_storage_refcount_leak, cgroup);
152+
if (!ASSERT_OK_PTR(link, "bpf_program__attach_cgroup"))
153+
goto out;
154+
skel->links.cgroup_storage_refcount_leak = link;
155+
156+
server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
157+
if (!ASSERT_GE(server_fd, 0, "start_server"))
158+
goto out;
159+
160+
client_fd = connect_to_fd(server_fd, 0);
161+
if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
162+
goto out;
163+
164+
map = skel->maps.cgrp_strg;
165+
err = bpf_map__lookup_elem(map, &cgroup, sizeof(cgroup), &value, sizeof(value), flags);
166+
if (!ASSERT_OK(err, "bpf_map__lookup_elem"))
167+
goto out;
168+
169+
ASSERT_EQ(value.value, 2, "refcount");
170+
171+
err = bpf_map__update_elem(map, &cgroup, sizeof(cgroup), &value, sizeof(value), flags);
172+
if (!ASSERT_OK(err, "bpf_map__update_elem"))
173+
goto out;
174+
175+
err = bpf_link__detach(skel->links.cgroup_storage_refcount_leak);
176+
if (!ASSERT_OK(err, "bpf_link__detach"))
177+
goto out;
178+
179+
link = bpf_program__attach(skel->progs.check_cgroup_storage_refcount);
180+
if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
181+
goto out;
182+
skel->links.check_cgroup_storage_refcount = link;
183+
184+
close(client_fd);
185+
client_fd = connect_to_fd(server_fd, 0);
186+
if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
187+
goto out;
188+
189+
err = bpf_map__lookup_elem(map, &cgroup, sizeof(cgroup), &value, sizeof(value), flags);
190+
if (!ASSERT_OK(err, "bpf_map__lookup_elem"))
191+
goto out;
192+
193+
ASSERT_EQ(value.value, 1, "refcount");
194+
out:
195+
close(cgroup);
196+
refcounted_kptr__destroy(skel);
197+
if (client_fd >= 0)
198+
close(client_fd);
199+
if (server_fd >= 0)
200+
close(server_fd);
201+
}
202+
203+
void test_kptr_refcount_leak(void)
204+
{
205+
if (test__start_subtest("percpu_hash_refcount_leak"))
206+
test_percpu_hash_refcount_leak();
207+
if (test__start_subtest("hash_lock_refcount_leak"))
208+
test_hash_lock_refcount_leak();
209+
if (test__start_subtest("cgroup_storage_lock_refcount_leak"))
210+
test_cgroup_storage_lock_refcount_leak();
211+
}

tools/testing/selftests/bpf/progs/refcounted_kptr.c

Lines changed: 160 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <bpf/bpf_core_read.h>
88
#include "bpf_misc.h"
99
#include "bpf_experimental.h"
10+
#include "bpf_tracing_net.h"
1011

1112
extern void bpf_rcu_read_lock(void) __ksym;
1213
extern void bpf_rcu_read_unlock(void) __ksym;
@@ -568,4 +569,163 @@ int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock,
568569
return 0;
569570
}
570571

572+
private(leak) u64 ref;
573+
574+
static u32 probe_read_refcount(void)
575+
{
576+
u32 refcnt;
577+
578+
bpf_probe_read_kernel(&refcnt, sizeof(refcnt), (void *) ref);
579+
return refcnt;
580+
}
581+
582+
static int __insert_in_list(struct bpf_list_head *head, struct bpf_spin_lock *lock,
583+
struct node_data __kptr **node)
584+
{
585+
struct node_data *n, *m;
586+
587+
n = bpf_obj_new(typeof(*n));
588+
if (!n)
589+
return -1;
590+
591+
m = bpf_refcount_acquire(n);
592+
n = bpf_kptr_xchg(node, n);
593+
if (n) {
594+
bpf_obj_drop(n);
595+
bpf_obj_drop(m);
596+
return -2;
597+
}
598+
599+
bpf_spin_lock(lock);
600+
bpf_list_push_front(head, &m->l);
601+
ref = (u64)(void *) &m->ref;
602+
bpf_spin_unlock(lock);
603+
return probe_read_refcount();
604+
}
605+
606+
static void *__lookup_map(void *map)
607+
{
608+
int key = 0;
609+
610+
return bpf_map_lookup_elem(map, &key);
611+
}
612+
613+
struct {
614+
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
615+
__type(key, int);
616+
__type(value, struct map_value);
617+
__uint(max_entries, 1);
618+
} pcpu_hash SEC(".maps");
619+
620+
SEC("tc")
621+
int pcpu_hash_refcount_leak(void *ctx)
622+
{
623+
struct map_value *v;
624+
625+
v = __lookup_map(&pcpu_hash);
626+
if (!v)
627+
return 0;
628+
629+
return __insert_in_list(&head, &lock, &v->node);
630+
}
631+
632+
SEC("tc")
633+
int check_pcpu_hash_refcount(void *ctx)
634+
{
635+
return probe_read_refcount();
636+
}
637+
638+
struct lock_map_value {
639+
struct node_data __kptr *node;
640+
struct bpf_spin_lock lock;
641+
int value;
642+
};
643+
644+
struct {
645+
__uint(type, BPF_MAP_TYPE_HASH);
646+
__type(key, int);
647+
__type(value, struct lock_map_value);
648+
__uint(max_entries, 1);
649+
} lock_hash SEC(".maps");
650+
651+
SEC("tc")
652+
int hash_lock_refcount_leak(void *ctx)
653+
{
654+
struct lock_map_value *v;
655+
656+
v = __lookup_map(&lock_hash);
657+
if (!v)
658+
return 0;
659+
660+
bpf_spin_lock(&v->lock);
661+
v->value = 42;
662+
bpf_spin_unlock(&v->lock);
663+
return __insert_in_list(&head, &lock, &v->node);
664+
}
665+
666+
SEC("tc")
667+
int check_hash_lock_refcount(void *ctx)
668+
{
669+
return probe_read_refcount();
670+
}
671+
672+
struct {
673+
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
674+
__uint(map_flags, BPF_F_NO_PREALLOC);
675+
__type(key, int);
676+
__type(value, struct lock_map_value);
677+
} cgrp_strg SEC(".maps");
678+
679+
SEC("cgroup/connect6")
680+
int cgroup_storage_refcount_leak(struct bpf_sock_addr *ctx)
681+
{
682+
struct lock_map_value *v;
683+
struct tcp_sock *tsk;
684+
struct bpf_sock *sk;
685+
u32 refcnt;
686+
687+
if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
688+
return 1;
689+
690+
sk = ctx->sk;
691+
if (!sk)
692+
return 1;
693+
694+
tsk = bpf_skc_to_tcp_sock(sk);
695+
if (!tsk)
696+
return 1;
697+
698+
v = bpf_cgrp_storage_get(&cgrp_strg, tsk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0,
699+
BPF_LOCAL_STORAGE_GET_F_CREATE);
700+
if (!v)
701+
return 1;
702+
703+
refcnt = __insert_in_list(&head, &lock, &v->node);
704+
bpf_spin_lock(&v->lock);
705+
v->value = refcnt;
706+
bpf_spin_unlock(&v->lock);
707+
return 1;
708+
}
709+
710+
SEC("fexit/inet_stream_connect")
711+
int BPF_PROG(check_cgroup_storage_refcount, struct socket *sock, struct sockaddr *uaddr, int addr_len,
712+
int flags)
713+
{
714+
struct lock_map_value *v;
715+
u32 refcnt;
716+
717+
if (uaddr->sa_family != AF_INET6)
718+
return 0;
719+
720+
v = bpf_cgrp_storage_get(&cgrp_strg, sock->sk->sk_cgrp_data.cgroup, 0, 0);
721+
if (!v)
722+
return 0;
723+
724+
refcnt = probe_read_refcount();
725+
bpf_spin_lock(&v->lock);
726+
v->value = refcnt;
727+
bpf_spin_unlock(&v->lock);
728+
return 0;
729+
}
730+
571731
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)