Skip to content

Commit 3c54da1

Browse files
committed
Merge tag 'kernel-5.14.0-503.3.1.el9_5' into merge-up-kernel-5.14.0-503.3.1.el9_5
kernel-5.14.0-503.3.1.el9_5 Signed-off-by: Lucas Zampieri <[email protected]>
2 parents e7841ca + 08e7937 commit 3c54da1

File tree

16 files changed

+134
-36
lines changed

16 files changed

+134
-36
lines changed

drivers/nvme/host/pci.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -862,7 +862,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
862862
nvme_start_request(req);
863863
return BLK_STS_OK;
864864
out_unmap_data:
865-
nvme_unmap_data(dev, req);
865+
if (blk_rq_nr_phys_segments(req))
866+
nvme_unmap_data(dev, req);
866867
out_free_cmd:
867868
nvme_cleanup_cmd(req);
868869
return ret;

fs/fuse/dev.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1612,9 +1612,11 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
16121612

16131613
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
16141614
err = fuse_copy_page(cs, &page, offset, this_num, 0);
1615-
if (!err && offset == 0 &&
1616-
(this_num == PAGE_SIZE || file_size == end))
1615+
if (!PageUptodate(page) && !err && offset == 0 &&
1616+
(this_num == PAGE_SIZE || file_size == end)) {
1617+
zero_user_segment(page, this_num, PAGE_SIZE);
16171618
SetPageUptodate(page);
1619+
}
16181620
unlock_page(page);
16191621
put_page(page);
16201622

include/linux/perf_event.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -788,6 +788,7 @@ struct perf_event {
788788
struct irq_work pending_irq;
789789
struct callback_head pending_task;
790790
unsigned int pending_work;
791+
struct rcuwait pending_work_wait;
791792

792793
atomic_t event_limit;
793794

include/linux/task_work.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,8 @@ int task_work_add(struct task_struct *task, struct callback_head *twork,
3030

3131
struct callback_head *task_work_cancel_match(struct task_struct *task,
3232
bool (*match)(struct callback_head *, void *data), void *data);
33-
struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
33+
struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t);
34+
bool task_work_cancel(struct task_struct *task, struct callback_head *cb);
3435
void task_work_run(void);
3536

3637
static inline void exit_task_work(struct task_struct *task)

include/linux/udp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ struct udp_sock {
9191
int forward_threshold;
9292
};
9393

94-
#define UDP_MAX_SEGMENTS (1 << 6UL)
94+
#define UDP_MAX_SEGMENTS (1 << 7UL)
9595

9696
static inline struct udp_sock *udp_sk(const struct sock *sk)
9797
{

include/linux/virtio_net.h

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
#define _LINUX_VIRTIO_NET_H
44

55
#include <linux/if_vlan.h>
6+
#include <linux/udp.h>
67
#include <uapi/linux/tcp.h>
7-
#include <uapi/linux/udp.h>
88
#include <uapi/linux/virtio_net.h>
99

1010
static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
@@ -151,9 +151,27 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
151151
unsigned int nh_off = p_off;
152152
struct skb_shared_info *shinfo = skb_shinfo(skb);
153153

154-
/* UFO may not include transport header in gso_size. */
155-
if (gso_type & SKB_GSO_UDP)
154+
switch (gso_type & ~SKB_GSO_TCP_ECN) {
155+
case SKB_GSO_UDP:
156+
/* UFO may not include transport header in gso_size. */
156157
nh_off -= thlen;
158+
break;
159+
case SKB_GSO_UDP_L4:
160+
if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
161+
return -EINVAL;
162+
if (skb->csum_offset != offsetof(struct udphdr, check))
163+
return -EINVAL;
164+
if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
165+
return -EINVAL;
166+
if (gso_type != SKB_GSO_UDP_L4)
167+
return -EINVAL;
168+
break;
169+
case SKB_GSO_TCPV4:
170+
case SKB_GSO_TCPV6:
171+
if (skb->csum_offset != offsetof(struct tcphdr, check))
172+
return -EINVAL;
173+
break;
174+
}
157175

158176
/* Too small packets are not really GSO ones. */
159177
if (skb->len - nh_off > gso_size) {

kernel/events/core.c

Lines changed: 38 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2284,18 +2284,14 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
22842284
}
22852285

22862286
if (event->pending_sigtrap) {
2287-
bool dec = true;
2288-
22892287
event->pending_sigtrap = 0;
22902288
if (state != PERF_EVENT_STATE_OFF &&
2291-
!event->pending_work) {
2289+
!event->pending_work &&
2290+
!task_work_add(current, &event->pending_task, TWA_RESUME)) {
22922291
event->pending_work = 1;
2293-
dec = false;
2294-
WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
2295-
task_work_add(current, &event->pending_task, TWA_RESUME);
2296-
}
2297-
if (dec)
2292+
} else {
22982293
local_dec(&event->ctx->nr_pending);
2294+
}
22992295
}
23002296

23012297
perf_event_set_state(event, state);
@@ -5187,9 +5183,35 @@ static bool exclusive_event_installable(struct perf_event *event,
51875183
static void perf_addr_filters_splice(struct perf_event *event,
51885184
struct list_head *head);
51895185

5186+
static void perf_pending_task_sync(struct perf_event *event)
5187+
{
5188+
struct callback_head *head = &event->pending_task;
5189+
5190+
if (!event->pending_work)
5191+
return;
5192+
/*
5193+
* If the task is queued to the current task's queue, we
5194+
* obviously can't wait for it to complete. Simply cancel it.
5195+
*/
5196+
if (task_work_cancel(current, head)) {
5197+
event->pending_work = 0;
5198+
local_dec(&event->ctx->nr_pending);
5199+
return;
5200+
}
5201+
5202+
/*
5203+
* All accesses related to the event are within the same
5204+
* non-preemptible section in perf_pending_task(). The RCU
5205+
* grace period before the event is freed will make sure all
5206+
* those accesses are complete by then.
5207+
*/
5208+
rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
5209+
}
5210+
51905211
static void _free_event(struct perf_event *event)
51915212
{
51925213
irq_work_sync(&event->pending_irq);
5214+
perf_pending_task_sync(event);
51935215

51945216
unaccount_event(event);
51955217

@@ -6807,24 +6829,28 @@ static void perf_pending_task(struct callback_head *head)
68076829
struct perf_event *event = container_of(head, struct perf_event, pending_task);
68086830
int rctx;
68096831

6832+
/*
6833+
* All accesses to the event must belong to the same implicit RCU read-side
6834+
* critical section as the ->pending_work reset. See comment in
6835+
* perf_pending_task_sync().
6836+
*/
6837+
preempt_disable_notrace();
68106838
/*
68116839
* If we 'fail' here, that's OK, it means recursion is already disabled
68126840
* and we won't recurse 'further'.
68136841
*/
6814-
preempt_disable_notrace();
68156842
rctx = perf_swevent_get_recursion_context();
68166843

68176844
if (event->pending_work) {
68186845
event->pending_work = 0;
68196846
perf_sigtrap(event);
68206847
local_dec(&event->ctx->nr_pending);
6848+
rcuwait_wake_up(&event->pending_work_wait);
68216849
}
68226850

68236851
if (rctx >= 0)
68246852
perf_swevent_put_recursion_context(rctx);
68256853
preempt_enable_notrace();
6826-
6827-
put_event(event);
68286854
}
68296855

68306856
#ifdef CONFIG_GUEST_PERF_EVENTS
@@ -11939,6 +11965,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1193911965
init_waitqueue_head(&event->waitq);
1194011966
init_irq_work(&event->pending_irq, perf_pending_irq);
1194111967
init_task_work(&event->pending_task, perf_pending_task);
11968+
rcuwait_init(&event->pending_work_wait);
1194211969

1194311970
mutex_init(&event->mmap_mutex);
1194411971
raw_spin_lock_init(&event->addr_filters.lock);

kernel/irq/manage.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1297,7 +1297,7 @@ static int irq_thread(void *data)
12971297
* synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
12981298
* oneshot mask bit can be set.
12991299
*/
1300-
task_work_cancel(current, irq_thread_dtor);
1300+
task_work_cancel_func(current, irq_thread_dtor);
13011301
return 0;
13021302
}
13031303

kernel/task_work.c

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -117,9 +117,9 @@ static bool task_work_func_match(struct callback_head *cb, void *data)
117117
}
118118

119119
/**
120-
* task_work_cancel - cancel a pending work added by task_work_add()
121-
* @task: the task which should execute the work
122-
* @func: identifies the work to remove
120+
* task_work_cancel_func - cancel a pending work matching a function added by task_work_add()
121+
* @task: the task which should execute the func's work
122+
* @func: identifies the func to match with a work to remove
123123
*
124124
* Find the last queued pending work with ->func == @func and remove
125125
* it from queue.
@@ -128,11 +128,35 @@ static bool task_work_func_match(struct callback_head *cb, void *data)
128128
* The found work or NULL if not found.
129129
*/
130130
struct callback_head *
131-
task_work_cancel(struct task_struct *task, task_work_func_t func)
131+
task_work_cancel_func(struct task_struct *task, task_work_func_t func)
132132
{
133133
return task_work_cancel_match(task, task_work_func_match, func);
134134
}
135135

136+
static bool task_work_match(struct callback_head *cb, void *data)
137+
{
138+
return cb == data;
139+
}
140+
141+
/**
142+
* task_work_cancel - cancel a pending work added by task_work_add()
143+
* @task: the task which should execute the work
144+
* @cb: the callback to remove if queued
145+
*
146+
* Remove a callback from a task's queue if queued.
147+
*
148+
* RETURNS:
149+
* True if the callback was queued and got cancelled, false otherwise.
150+
*/
151+
bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
152+
{
153+
struct callback_head *ret;
154+
155+
ret = task_work_cancel_match(task, task_work_match, cb);
156+
157+
return ret == cb;
158+
}
159+
136160
/**
137161
* task_work_run - execute the works added by task_work_add()
138162
*
@@ -165,7 +189,7 @@ void task_work_run(void)
165189
if (!work)
166190
break;
167191
/*
168-
* Synchronize with task_work_cancel(). It can not remove
192+
* Synchronize with task_work_cancel_match(). It can not remove
169193
* the first entry == work, cmpxchg(task_works) must fail.
170194
* But it can remove another entry from the ->next list.
171195
*/

net/ipv4/tcp_offload.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
7373
if (thlen < sizeof(*th))
7474
goto out;
7575

76+
if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
77+
goto out;
78+
7679
if (!pskb_may_pull(skb, thlen))
7780
goto out;
7881

0 commit comments

Comments
 (0)