@@ -3391,13 +3391,17 @@ struct bpf_iter__udp {
3391
3391
int bucket __aligned (8 );
3392
3392
};
3393
3393
3394
+ union bpf_udp_iter_batch_item {
3395
+ struct sock * sock ;
3396
+ };
3397
+
3394
3398
struct bpf_udp_iter_state {
3395
3399
struct udp_iter_state state ;
3396
3400
unsigned int cur_sk ;
3397
3401
unsigned int end_sk ;
3398
3402
unsigned int max_sk ;
3399
3403
int offset ;
3400
- struct sock * * batch ;
3404
+ union bpf_udp_iter_batch_item * batch ;
3401
3405
bool st_bucket_done ;
3402
3406
};
3403
3407
@@ -3460,7 +3464,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
3460
3464
}
3461
3465
if (iter -> end_sk < iter -> max_sk ) {
3462
3466
sock_hold (sk );
3463
- iter -> batch [iter -> end_sk ++ ] = sk ;
3467
+ iter -> batch [iter -> end_sk ++ ]. sock = sk ;
3464
3468
}
3465
3469
batch_sks ++ ;
3466
3470
}
@@ -3478,7 +3482,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
3478
3482
return ERR_PTR (err );
3479
3483
}
3480
3484
3481
- sk = iter -> batch [iter -> end_sk - 1 ];
3485
+ sk = iter -> batch [iter -> end_sk - 1 ]. sock ;
3482
3486
sk = hlist_entry_safe (sk -> __sk_common .skc_portaddr_node .next ,
3483
3487
struct sock ,
3484
3488
__sk_common .skc_portaddr_node );
@@ -3504,11 +3508,11 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
3504
3508
* socket to be iterated from the batch.
3505
3509
*/
3506
3510
iter -> st_bucket_done = true;
3507
- return iter -> batch [0 ];
3511
+ return iter -> batch [0 ]. sock ;
3508
3512
}
3509
3513
3510
3514
if (WARN_ON_ONCE (resizes >= MAX_REALLOC_ATTEMPTS ))
3511
- return iter -> batch [0 ];
3515
+ return iter -> batch [0 ]. sock ;
3512
3516
3513
3517
err = bpf_iter_udp_realloc_batch (iter , batch_sks * 3 / 2 , GFP_USER );
3514
3518
if (err )
@@ -3527,15 +3531,15 @@ static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3527
3531
* done with seq_show(), so unref the iter->cur_sk.
3528
3532
*/
3529
3533
if (iter -> cur_sk < iter -> end_sk ) {
3530
- sock_put (iter -> batch [iter -> cur_sk ++ ]);
3534
+ sock_put (iter -> batch [iter -> cur_sk ++ ]. sock );
3531
3535
++ iter -> offset ;
3532
3536
}
3533
3537
3534
3538
/* After updating iter->cur_sk, check if there are more sockets
3535
3539
* available in the current bucket batch.
3536
3540
*/
3537
3541
if (iter -> cur_sk < iter -> end_sk )
3538
- sk = iter -> batch [iter -> cur_sk ];
3542
+ sk = iter -> batch [iter -> cur_sk ]. sock ;
3539
3543
else
3540
3544
/* Prepare a new batch. */
3541
3545
sk = bpf_iter_udp_batch (seq );
@@ -3600,7 +3604,7 @@ static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
3600
3604
static void bpf_iter_udp_put_batch (struct bpf_udp_iter_state * iter )
3601
3605
{
3602
3606
while (iter -> cur_sk < iter -> end_sk )
3603
- sock_put (iter -> batch [iter -> cur_sk ++ ]);
3607
+ sock_put (iter -> batch [iter -> cur_sk ++ ]. sock );
3604
3608
}
3605
3609
3606
3610
static void bpf_iter_udp_seq_stop (struct seq_file * seq , void * v )
@@ -3863,7 +3867,7 @@ DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
3863
3867
static int bpf_iter_udp_realloc_batch (struct bpf_udp_iter_state * iter ,
3864
3868
unsigned int new_batch_sz , int flags )
3865
3869
{
3866
- struct sock * * new_batch ;
3870
+ union bpf_udp_iter_batch_item * new_batch ;
3867
3871
3868
3872
new_batch = kvmalloc_array (new_batch_sz , sizeof (* new_batch ),
3869
3873
flags | __GFP_NOWARN );
0 commit comments