|  | 
| 3 | 3 | 
 | 
| 4 | 4 | #include <test_progs.h> | 
| 5 | 5 | #include <network_helpers.h> | 
| 6 |  | - | 
|  | 6 | +#include "cgroup_helpers.h" | 
| 7 | 7 | #include "refcounted_kptr.skel.h" | 
| 8 | 8 | #include "refcounted_kptr_fail.skel.h" | 
| 9 | 9 | 
 | 
| @@ -44,3 +44,168 @@ void test_refcounted_kptr_wrong_owner(void) | 
| 44 | 44 | 	ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval"); | 
| 45 | 45 | 	refcounted_kptr__destroy(skel); | 
| 46 | 46 | } | 
|  | 47 | + | 
|  | 48 | +static void test_refcnt_leak(void *values, size_t values_sz, u64 flags, struct bpf_map *map, | 
|  | 49 | +			     struct bpf_program *prog_leak, struct bpf_program *prog_check) | 
|  | 50 | +{ | 
|  | 51 | +	int ret, fd, key = 0; | 
|  | 52 | +	LIBBPF_OPTS(bpf_test_run_opts, opts, | 
|  | 53 | +		    .data_in = &pkt_v4, | 
|  | 54 | +		    .data_size_in = sizeof(pkt_v4), | 
|  | 55 | +		    .repeat = 1, | 
|  | 56 | +	); | 
|  | 57 | + | 
|  | 58 | +	ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags); | 
|  | 59 | +	if (!ASSERT_OK(ret, "bpf_map__update_elem init")) | 
|  | 60 | +		return; | 
|  | 61 | + | 
|  | 62 | +	fd = bpf_program__fd(prog_leak); | 
|  | 63 | +	ret = bpf_prog_test_run_opts(fd, &opts); | 
|  | 64 | +	if (!ASSERT_OK(ret, "test_run_opts")) | 
|  | 65 | +		return; | 
|  | 66 | +	if (!ASSERT_EQ(opts.retval, 2, "retval refcount")) | 
|  | 67 | +		return; | 
|  | 68 | + | 
|  | 69 | +	ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags); | 
|  | 70 | +	if (!ASSERT_OK(ret, "bpf_map__update_elem dec refcount")) | 
|  | 71 | +		return; | 
|  | 72 | + | 
|  | 73 | +	fd = bpf_program__fd(prog_check); | 
|  | 74 | +	ret = bpf_prog_test_run_opts(fd, &opts); | 
|  | 75 | +	ASSERT_OK(ret, "test_run_opts"); | 
|  | 76 | +	ASSERT_EQ(opts.retval, 1, "retval"); | 
|  | 77 | +} | 
|  | 78 | + | 
|  | 79 | +static void test_percpu_hash_refcount_leak(void) | 
|  | 80 | +{ | 
|  | 81 | +	struct refcounted_kptr *skel; | 
|  | 82 | +	size_t values_sz; | 
|  | 83 | +	u64 *values; | 
|  | 84 | +	int cpu_nr; | 
|  | 85 | + | 
|  | 86 | +	cpu_nr = libbpf_num_possible_cpus(); | 
|  | 87 | +	if (!ASSERT_GT(cpu_nr, 0, "libbpf_num_possible_cpus")) | 
|  | 88 | +		return; | 
|  | 89 | + | 
|  | 90 | +	values = calloc(cpu_nr, sizeof(u64)); | 
|  | 91 | +	if (!ASSERT_OK_PTR(values, "calloc values")) | 
|  | 92 | +		return; | 
|  | 93 | + | 
|  | 94 | +	skel = refcounted_kptr__open_and_load(); | 
|  | 95 | +	if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) { | 
|  | 96 | +		free(values); | 
|  | 97 | +		return; | 
|  | 98 | +	} | 
|  | 99 | + | 
|  | 100 | +	values_sz = cpu_nr * sizeof(u64); | 
|  | 101 | +	memset(values, 0, values_sz); | 
|  | 102 | + | 
|  | 103 | +	test_refcnt_leak(values, values_sz, 0, skel->maps.pcpu_hash, | 
|  | 104 | +			 skel->progs.pcpu_hash_refcount_leak, | 
|  | 105 | +			 skel->progs.check_pcpu_hash_refcount); | 
|  | 106 | + | 
|  | 107 | +	refcounted_kptr__destroy(skel); | 
|  | 108 | +	free(values); | 
|  | 109 | +} | 
|  | 110 | + | 
|  | 111 | +struct lock_map_value { | 
|  | 112 | +	u64 kptr; | 
|  | 113 | +	struct bpf_spin_lock lock; | 
|  | 114 | +	int value; | 
|  | 115 | +}; | 
|  | 116 | + | 
|  | 117 | +static void test_hash_lock_refcount_leak(void) | 
|  | 118 | +{ | 
|  | 119 | +	struct lock_map_value value = {}; | 
|  | 120 | +	struct refcounted_kptr *skel; | 
|  | 121 | + | 
|  | 122 | +	skel = refcounted_kptr__open_and_load(); | 
|  | 123 | +	if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) | 
|  | 124 | +		return; | 
|  | 125 | + | 
|  | 126 | +	test_refcnt_leak(&value, sizeof(value), BPF_F_LOCK, skel->maps.lock_hash, | 
|  | 127 | +			 skel->progs.hash_lock_refcount_leak, | 
|  | 128 | +			 skel->progs.check_hash_lock_refcount); | 
|  | 129 | + | 
|  | 130 | +	refcounted_kptr__destroy(skel); | 
|  | 131 | +} | 
|  | 132 | + | 
|  | 133 | +static void test_cgroup_storage_lock_refcount_leak(void) | 
|  | 134 | +{ | 
|  | 135 | +	int server_fd = -1, client_fd = -1; | 
|  | 136 | +	struct lock_map_value value = {}; | 
|  | 137 | +	struct refcounted_kptr *skel; | 
|  | 138 | +	u64 flags = BPF_F_LOCK; | 
|  | 139 | +	struct bpf_link *link; | 
|  | 140 | +	struct bpf_map *map; | 
|  | 141 | +	int cgroup, err; | 
|  | 142 | + | 
|  | 143 | +	cgroup = test__join_cgroup("/cg_refcount_leak"); | 
|  | 144 | +	if (!ASSERT_GE(cgroup, 0, "test__join_cgroup")) | 
|  | 145 | +		return; | 
|  | 146 | + | 
|  | 147 | +	skel = refcounted_kptr__open_and_load(); | 
|  | 148 | +	if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) | 
|  | 149 | +		goto out; | 
|  | 150 | + | 
|  | 151 | +	link = bpf_program__attach_cgroup(skel->progs.cgroup_storage_refcount_leak, cgroup); | 
|  | 152 | +	if (!ASSERT_OK_PTR(link, "bpf_program__attach_cgroup")) | 
|  | 153 | +		goto out; | 
|  | 154 | +	skel->links.cgroup_storage_refcount_leak = link; | 
|  | 155 | + | 
|  | 156 | +	server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); | 
|  | 157 | +	if (!ASSERT_GE(server_fd, 0, "start_server")) | 
|  | 158 | +		goto out; | 
|  | 159 | + | 
|  | 160 | +	client_fd = connect_to_fd(server_fd, 0); | 
|  | 161 | +	if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) | 
|  | 162 | +		goto out; | 
|  | 163 | + | 
|  | 164 | +	map = skel->maps.cgrp_strg; | 
|  | 165 | +	err = bpf_map__lookup_elem(map, &cgroup, sizeof(cgroup), &value, sizeof(value), flags); | 
|  | 166 | +	if (!ASSERT_OK(err, "bpf_map__lookup_elem")) | 
|  | 167 | +		goto out; | 
|  | 168 | + | 
|  | 169 | +	ASSERT_EQ(value.value, 2, "refcount"); | 
|  | 170 | + | 
|  | 171 | +	err = bpf_map__update_elem(map, &cgroup, sizeof(cgroup), &value, sizeof(value), flags); | 
|  | 172 | +	if (!ASSERT_OK(err, "bpf_map__update_elem")) | 
|  | 173 | +		goto out; | 
|  | 174 | + | 
|  | 175 | +	err = bpf_link__detach(skel->links.cgroup_storage_refcount_leak); | 
|  | 176 | +	if (!ASSERT_OK(err, "bpf_link__detach")) | 
|  | 177 | +		goto out; | 
|  | 178 | + | 
|  | 179 | +	link = bpf_program__attach(skel->progs.check_cgroup_storage_refcount); | 
|  | 180 | +	if (!ASSERT_OK_PTR(link, "bpf_program__attach")) | 
|  | 181 | +		goto out; | 
|  | 182 | +	skel->links.check_cgroup_storage_refcount = link; | 
|  | 183 | + | 
|  | 184 | +	close(client_fd); | 
|  | 185 | +	client_fd = connect_to_fd(server_fd, 0); | 
|  | 186 | +	if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) | 
|  | 187 | +		goto out; | 
|  | 188 | + | 
|  | 189 | +	err = bpf_map__lookup_elem(map, &cgroup, sizeof(cgroup), &value, sizeof(value), flags); | 
|  | 190 | +	if (!ASSERT_OK(err, "bpf_map__lookup_elem")) | 
|  | 191 | +		goto out; | 
|  | 192 | + | 
|  | 193 | +	ASSERT_EQ(value.value, 1, "refcount"); | 
|  | 194 | +out: | 
|  | 195 | +	close(cgroup); | 
|  | 196 | +	refcounted_kptr__destroy(skel); | 
|  | 197 | +	if (client_fd >= 0) | 
|  | 198 | +		close(client_fd); | 
|  | 199 | +	if (server_fd >= 0) | 
|  | 200 | +		close(server_fd); | 
|  | 201 | +} | 
|  | 202 | + | 
|  | 203 | +void test_kptr_refcount_leak(void) | 
|  | 204 | +{ | 
|  | 205 | +	if (test__start_subtest("percpu_hash_refcount_leak")) | 
|  | 206 | +		test_percpu_hash_refcount_leak(); | 
|  | 207 | +	if (test__start_subtest("hash_lock_refcount_leak")) | 
|  | 208 | +		test_hash_lock_refcount_leak(); | 
|  | 209 | +	if (test__start_subtest("cgroup_storage_lock_refcount_leak")) | 
|  | 210 | +		test_cgroup_storage_lock_refcount_leak(); | 
|  | 211 | +} | 
0 commit comments