Skip to content

Commit 18d58e4

Browse files
committed
bpf: udp: Use bpf_udp_iter_batch_item for bpf_udp_iter_state batch items
Prepare for the next patch that tracks cookies between iterations by converting struct sock **batch to union bpf_udp_iter_batch_item *batch inside struct bpf_udp_iter_state. Signed-off-by: Jordan Rife <[email protected]> Reviewed-by: Kuniyuki Iwashima <[email protected]>
1 parent fdbbaf8 commit 18d58e4

File tree

1 file changed

+13
-9
lines changed

1 file changed

+13
-9
lines changed

net/ipv4/udp.c

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3391,13 +3391,17 @@ struct bpf_iter__udp {
33913391
int bucket __aligned(8);
33923392
};
33933393

3394+
union bpf_udp_iter_batch_item {
3395+
struct sock *sock;
3396+
};
3397+
33943398
struct bpf_udp_iter_state {
33953399
struct udp_iter_state state;
33963400
unsigned int cur_sk;
33973401
unsigned int end_sk;
33983402
unsigned int max_sk;
33993403
int offset;
3400-
struct sock **batch;
3404+
union bpf_udp_iter_batch_item *batch;
34013405
bool st_bucket_done;
34023406
};
34033407

@@ -3460,7 +3464,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
34603464
}
34613465
if (iter->end_sk < iter->max_sk) {
34623466
sock_hold(sk);
3463-
iter->batch[iter->end_sk++] = sk;
3467+
iter->batch[iter->end_sk++].sock = sk;
34643468
}
34653469
batch_sks++;
34663470
}
@@ -3478,7 +3482,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
34783482
return ERR_PTR(err);
34793483
}
34803484

3481-
sk = iter->batch[iter->end_sk - 1];
3485+
sk = iter->batch[iter->end_sk - 1].sock;
34823486
sk = hlist_entry_safe(sk->__sk_common.skc_portaddr_node.next,
34833487
struct sock,
34843488
__sk_common.skc_portaddr_node);
@@ -3504,11 +3508,11 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
35043508
* socket to be iterated from the batch.
35053509
*/
35063510
iter->st_bucket_done = true;
3507-
return iter->batch[0];
3511+
return iter->batch[0].sock;
35083512
}
35093513

35103514
if (WARN_ON_ONCE(resizes >= MAX_REALLOC_ATTEMPTS))
3511-
return iter->batch[0];
3515+
return iter->batch[0].sock;
35123516

35133517
err = bpf_iter_udp_realloc_batch(iter, batch_sks * 3 / 2, GFP_USER);
35143518
if (err)
@@ -3527,15 +3531,15 @@ static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
35273531
* done with seq_show(), so unref the iter->cur_sk.
35283532
*/
35293533
if (iter->cur_sk < iter->end_sk) {
3530-
sock_put(iter->batch[iter->cur_sk++]);
3534+
sock_put(iter->batch[iter->cur_sk++].sock);
35313535
++iter->offset;
35323536
}
35333537

35343538
/* After updating iter->cur_sk, check if there are more sockets
35353539
* available in the current bucket batch.
35363540
*/
35373541
if (iter->cur_sk < iter->end_sk)
3538-
sk = iter->batch[iter->cur_sk];
3542+
sk = iter->batch[iter->cur_sk].sock;
35393543
else
35403544
/* Prepare a new batch. */
35413545
sk = bpf_iter_udp_batch(seq);
@@ -3600,7 +3604,7 @@ static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
36003604
static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter)
36013605
{
36023606
while (iter->cur_sk < iter->end_sk)
3603-
sock_put(iter->batch[iter->cur_sk++]);
3607+
sock_put(iter->batch[iter->cur_sk++].sock);
36043608
}
36053609

36063610
static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
@@ -3863,7 +3867,7 @@ DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
38633867
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
38643868
unsigned int new_batch_sz, int flags)
38653869
{
3866-
struct sock **new_batch;
3870+
union bpf_udp_iter_batch_item *new_batch;
38673871

38683872
new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch),
38693873
flags | __GFP_NOWARN);

0 commit comments

Comments
 (0)