Skip to content

Commit beb7c0c

Browse files
committed
xen/netfront: force data bouncing when backend is untrusted
jira VULN-1439 cve CVE-2022-33741 commit-author Roger Pau Monne <[email protected]> commit 4491001 upstream-diff Some merge conflicts were fixed up mainly due to the fact that this version of the driver does not have xdp support. The content of the added code itself is identical to the upstream change with the exception of PAGE_SIZE replacing XEN_PAGE_SIZE. In this kernel there is no difference between the two where as in future kernels the ARM kernel page size might be 64k while the xen page size stays 4k. Bounce all data on the skbs to be transmitted into zeroed pages if the backend is untrusted. This avoids leaking data present in the pages shared with the backend but not part of the skb fragments. This requires introducing a new helper in order to allocate skbs with a size multiple of XEN_PAGE_SIZE so we don't leak contiguous data on the granted pages. Reporting whether the backend is to be trusted can be done using a module parameter, or from the xenstore frontend path as set by the toolstack when adding the device. This is CVE-2022-33741, part of XSA-403. Signed-off-by: Roger Pau Monné <[email protected]> Reviewed-by: Juergen Gross <[email protected]> Signed-off-by: Juergen Gross <[email protected]> (cherry picked from commit 4491001) Signed-off-by: Brett Mastbergen <[email protected]>
1 parent 224be9b commit beb7c0c

File tree

1 file changed

+49
-2
lines changed

1 file changed

+49
-2
lines changed

drivers/net/xen-netfront.c

+49-2
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
6464
MODULE_PARM_DESC(max_queues,
6565
"Maximum number of queues per virtual interface");
6666

67+
static bool __read_mostly xennet_trusted = true;
68+
module_param_named(trusted, xennet_trusted, bool, 0644);
69+
MODULE_PARM_DESC(trusted, "Is the backend trusted");
70+
6771
static const struct ethtool_ops xennet_ethtool_ops;
6872

6973
struct netfront_cb {
@@ -160,6 +164,9 @@ struct netfront_info {
160164
struct netfront_stats __percpu *rx_stats;
161165
struct netfront_stats __percpu *tx_stats;
162166

167+
/* Should skbs be bounced into a zeroed buffer? */
168+
bool bounce;
169+
163170
atomic_t rx_gso_checksum_fixup;
164171
};
165172

@@ -516,6 +523,34 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
516523
return queue_idx;
517524
}
518525

526+
struct sk_buff *bounce_skb(const struct sk_buff *skb)
527+
{
528+
unsigned int headerlen = skb_headroom(skb);
529+
/* Align size to allocate full pages and avoid contiguous data leaks */
530+
unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
531+
PAGE_SIZE);
532+
struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
533+
534+
if (!n)
535+
return NULL;
536+
537+
if (!IS_ALIGNED((uintptr_t)n->head, PAGE_SIZE)) {
538+
WARN_ONCE(1, "misaligned skb allocated\n");
539+
kfree_skb(n);
540+
return NULL;
541+
}
542+
543+
/* Set the data pointer */
544+
skb_reserve(n, headerlen);
545+
/* Set the tail pointer and length */
546+
skb_put(n, skb->len);
547+
548+
BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
549+
550+
skb_copy_header(n, skb);
551+
return n;
552+
}
553+
519554
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
520555
{
521556
struct netfront_info *np = netdev_priv(dev);
@@ -563,9 +598,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
563598

564599
/* The first req should be at least ETH_HLEN size or the packet will be
565600
* dropped by netback.
601+
*
602+
* If the backend is not trusted bounce all data to zeroed pages to
603+
* avoid exposing contiguous data on the granted page not belonging to
604+
* the skb.
566605
*/
567-
if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
568-
nskb = skb_copy(skb, GFP_ATOMIC);
606+
if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
607+
nskb = bounce_skb(skb);
569608
if (!nskb)
570609
goto drop;
571610
dev_kfree_skb_any(skb);
@@ -1774,6 +1813,10 @@ static int talk_to_netback(struct xenbus_device *dev,
17741813

17751814
info->netdev->irq = 0;
17761815

1816+
/* Check if backend is trusted. */
1817+
info->bounce = !xennet_trusted ||
1818+
!xenbus_read_unsigned(dev->nodename, "trusted", 1);
1819+
17771820
/* Check if backend supports multiple queues */
17781821
err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
17791822
"multi-queue-max-queues", "%u", &max_queues);
@@ -1936,6 +1979,10 @@ static int xennet_connect(struct net_device *dev)
19361979
if (err)
19371980
return err;
19381981

1982+
if (np->bounce)
1983+
dev_info(&np->xbdev->dev,
1984+
"bouncing transmitted data to zeroed pages\n");
1985+
19391986
/* talk_to_netback() sets the correct number of queues */
19401987
num_queues = dev->real_num_tx_queues;
19411988

0 commit comments

Comments
 (0)