Skip to content

Commit 69925f5

Browse files
committed
rxrpc: Fix locking issue
jira LE-1907 Rebuild_History Non-Buildable kernel-5.14.0-284.30.1.el9_2 commit-author David Howells <[email protected]> commit ad25f5c There's a locking issue with the per-netns list of calls in rxrpc. The pieces of code that add and remove a call from the list use write_lock() and the calls procfile uses read_lock() to access it. However, the timer callback function may trigger a removal by trying to queue a call for processing and finding that it's already queued - at which point it has a spare refcount that it has to do something with. Unfortunately, if it puts the call and this reduces the refcount to 0, the call will be removed from the list. Unfortunately, since the _bh variants of the locking functions aren't used, this can deadlock. ================================ WARNING: inconsistent lock state 5.18.0-rc3-build4+ ctrliq#10 Not tainted -------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. ksoftirqd/2/25 [HC0[0]:SC1[1]:HE1:SE0] takes: ffff888107ac4038 (&rxnet->call_lock){+.?.}-{2:2}, at: rxrpc_put_call+0x103/0x14b {SOFTIRQ-ON-W} state was registered at: ... Possible unsafe locking scenario: CPU0 ---- lock(&rxnet->call_lock); <Interrupt> lock(&rxnet->call_lock); *** DEADLOCK *** 1 lock held by ksoftirqd/2/25: #0: ffff8881008ffdb0 ((&call->timer)){+.-.}-{0:0}, at: call_timer_fn+0x5/0x23d Changes ======= ver #2) - Changed to using list_next_rcu() rather than rcu_dereference() directly. Fixes: 17926a7 ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both") Signed-off-by: David Howells <[email protected]> cc: Marc Dionne <[email protected]> cc: [email protected] Signed-off-by: David S. Miller <[email protected]> (cherry picked from commit ad25f5c) Signed-off-by: Jonathan Maple <[email protected]>
1 parent 10ddcc8 commit 69925f5

File tree

8 files changed

+62
-22
lines changed

8 files changed

+62
-22
lines changed

fs/seq_file.c

+32
Original file line numberDiff line numberDiff line change
@@ -947,6 +947,38 @@ struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
947947
}
948948
EXPORT_SYMBOL(seq_list_next);
949949

950+
struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos)
951+
{
952+
struct list_head *lh;
953+
954+
list_for_each_rcu(lh, head)
955+
if (pos-- == 0)
956+
return lh;
957+
958+
return NULL;
959+
}
960+
EXPORT_SYMBOL(seq_list_start_rcu);
961+
962+
struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos)
963+
{
964+
if (!pos)
965+
return head;
966+
967+
return seq_list_start_rcu(head, pos - 1);
968+
}
969+
EXPORT_SYMBOL(seq_list_start_head_rcu);
970+
971+
struct list_head *seq_list_next_rcu(void *v, struct list_head *head,
972+
loff_t *ppos)
973+
{
974+
struct list_head *lh;
975+
976+
lh = list_next_rcu((struct list_head *)v);
977+
++*ppos;
978+
return lh == head ? NULL : lh;
979+
}
980+
EXPORT_SYMBOL(seq_list_next_rcu);
981+
950982
/**
951983
* seq_hlist_start - start an iteration of a hlist
952984
* @head: the head of the hlist

include/linux/list.h

+10
Original file line numberDiff line numberDiff line change
@@ -570,6 +570,16 @@ static inline void list_splice_tail_init(struct list_head *list,
570570
#define list_for_each(pos, head) \
571571
for (pos = (head)->next; pos != (head); pos = pos->next)
572572

573+
/**
574+
* list_for_each_rcu - Iterate over a list in an RCU-safe fashion
575+
* @pos: the &struct list_head to use as a loop cursor.
576+
* @head: the head for your list.
577+
*/
578+
#define list_for_each_rcu(pos, head) \
579+
for (pos = rcu_dereference((head)->next); \
580+
!list_is_head(pos, (head)); \
581+
pos = rcu_dereference(pos->next))
582+
573583
/**
574584
* list_for_each_continue - continue iteration over a list
575585
* @pos: the &struct list_head to use as a loop cursor.

include/linux/seq_file.h

+4
Original file line numberDiff line numberDiff line change
@@ -262,6 +262,10 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
262262
extern struct list_head *seq_list_next(void *v, struct list_head *head,
263263
loff_t *ppos);
264264

265+
extern struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos);
266+
extern struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos);
267+
extern struct list_head *seq_list_next_rcu(void *v, struct list_head *head, loff_t *ppos);
268+
265269
/*
266270
* Helpers for iteration over hlist_head-s in seq_files
267271
*/

net/rxrpc/ar-internal.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ struct rxrpc_net {
6060
struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
6161
u32 epoch; /* Local epoch for detecting local-end reset */
6262
struct list_head calls; /* List of calls active in this namespace */
63-
rwlock_t call_lock; /* Lock for ->calls */
63+
spinlock_t call_lock; /* Lock for ->calls */
6464
atomic_t nr_calls; /* Count of allocated calls */
6565

6666
atomic_t nr_conns;

net/rxrpc/call_accept.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -140,9 +140,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
140140
write_unlock(&rx->call_lock);
141141

142142
rxnet = call->rxnet;
143-
write_lock(&rxnet->call_lock);
144-
list_add_tail(&call->link, &rxnet->calls);
145-
write_unlock(&rxnet->call_lock);
143+
spin_lock_bh(&rxnet->call_lock);
144+
list_add_tail_rcu(&call->link, &rxnet->calls);
145+
spin_unlock_bh(&rxnet->call_lock);
146146

147147
b->call_backlog[call_head] = call;
148148
smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));

net/rxrpc/call_object.c

+9-9
Original file line numberDiff line numberDiff line change
@@ -337,9 +337,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
337337
write_unlock(&rx->call_lock);
338338

339339
rxnet = call->rxnet;
340-
write_lock(&rxnet->call_lock);
341-
list_add_tail(&call->link, &rxnet->calls);
342-
write_unlock(&rxnet->call_lock);
340+
spin_lock_bh(&rxnet->call_lock);
341+
list_add_tail_rcu(&call->link, &rxnet->calls);
342+
spin_unlock_bh(&rxnet->call_lock);
343343

344344
/* From this point on, the call is protected by its own lock. */
345345
release_sock(&rx->sk);
@@ -633,9 +633,9 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
633633
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
634634

635635
if (!list_empty(&call->link)) {
636-
write_lock(&rxnet->call_lock);
636+
spin_lock_bh(&rxnet->call_lock);
637637
list_del_init(&call->link);
638-
write_unlock(&rxnet->call_lock);
638+
spin_unlock_bh(&rxnet->call_lock);
639639
}
640640

641641
rxrpc_cleanup_call(call);
@@ -707,7 +707,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
707707
_enter("");
708708

709709
if (!list_empty(&rxnet->calls)) {
710-
write_lock(&rxnet->call_lock);
710+
spin_lock_bh(&rxnet->call_lock);
711711

712712
while (!list_empty(&rxnet->calls)) {
713713
call = list_entry(rxnet->calls.next,
@@ -722,12 +722,12 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
722722
rxrpc_call_states[call->state],
723723
call->flags, call->events);
724724

725-
write_unlock(&rxnet->call_lock);
725+
spin_unlock_bh(&rxnet->call_lock);
726726
cond_resched();
727-
write_lock(&rxnet->call_lock);
727+
spin_lock_bh(&rxnet->call_lock);
728728
}
729729

730-
write_unlock(&rxnet->call_lock);
730+
spin_unlock_bh(&rxnet->call_lock);
731731
}
732732

733733
atomic_dec(&rxnet->nr_calls);

net/rxrpc/net_ns.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ static __net_init int rxrpc_init_net(struct net *net)
5050
rxnet->epoch |= RXRPC_RANDOM_EPOCH;
5151

5252
INIT_LIST_HEAD(&rxnet->calls);
53-
rwlock_init(&rxnet->call_lock);
53+
spin_lock_init(&rxnet->call_lock);
5454
atomic_set(&rxnet->nr_calls, 1);
5555

5656
atomic_set(&rxnet->nr_conns, 1);

net/rxrpc/proc.c

+2-8
Original file line numberDiff line numberDiff line change
@@ -26,29 +26,23 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
2626
*/
2727
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
2828
__acquires(rcu)
29-
__acquires(rxnet->call_lock)
3029
{
3130
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
3231

3332
rcu_read_lock();
34-
read_lock(&rxnet->call_lock);
35-
return seq_list_start_head(&rxnet->calls, *_pos);
33+
return seq_list_start_head_rcu(&rxnet->calls, *_pos);
3634
}
3735

3836
static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3937
{
4038
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
4139

42-
return seq_list_next(v, &rxnet->calls, pos);
40+
return seq_list_next_rcu(v, &rxnet->calls, pos);
4341
}
4442

4543
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
46-
__releases(rxnet->call_lock)
4744
__releases(rcu)
4845
{
49-
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
50-
51-
read_unlock(&rxnet->call_lock);
5246
rcu_read_unlock();
5347
}
5448

0 commit comments

Comments
 (0)