Skip to content

Commit 4944724

Browse files
committed
xen/blkfront: force data bouncing when backend is untrusted
jira VULN-1039 cve CVE-2022-33742 commit-author Roger Pau Monne <[email protected]> commit 2400617 upstream-diff There were a lot of merge conflicts with this change as the upstream version of this file has had a lot of development. Still there is pretty much a 1:1 mapping of the changes against the upstream and the changes against this kernel Split the current bounce buffering logic used with persistent grants into it's own option, and allow enabling it independently of persistent grants. This allows to reuse the same code paths to perform the bounce buffering required to avoid leaking contiguous data in shared pages not part of the request fragments. Reporting whether the backend is to be trusted can be done using a module parameter, or from the xenstore frontend path as set by the toolstack when adding the device. This is CVE-2022-33742, part of XSA-403. Signed-off-by: Roger Pau Monné <[email protected]> Reviewed-by: Juergen Gross <[email protected]> Signed-off-by: Juergen Gross <[email protected]> (cherry picked from commit 2400617) Signed-off-by: Brett Mastbergen <[email protected]>
1 parent beb7c0c commit 4944724

File tree

1 file changed

+27
-14
lines changed

1 file changed

+27
-14
lines changed

drivers/block/xen-blkfront.c

+27-14
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,10 @@ static unsigned int xen_blkif_max_segments = 32;
9898
module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
9999
MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
100100

101+
static bool __read_mostly xen_blkif_trusted = true;
102+
module_param_named(trusted, xen_blkif_trusted, bool, 0644);
103+
MODULE_PARM_DESC(trusted, "Is the backend trusted");
104+
101105
#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
102106

103107
/*
@@ -131,6 +135,7 @@ struct blkfront_info
131135
unsigned int discard_granularity;
132136
unsigned int discard_alignment;
133137
unsigned int feature_persistent:1;
138+
unsigned int bounce:1;
134139
unsigned int max_indirect_segments;
135140
int is_ready;
136141
};
@@ -200,7 +205,7 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
200205
if (!gnt_list_entry)
201206
goto out_of_memory;
202207

203-
if (info->feature_persistent) {
208+
if (info->bounce) {
204209
granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
205210
if (!granted_page) {
206211
kfree(gnt_list_entry);
@@ -220,7 +225,7 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
220225
list_for_each_entry_safe(gnt_list_entry, n,
221226
&info->grants, node) {
222227
list_del(&gnt_list_entry->node);
223-
if (info->feature_persistent)
228+
if (info->bounce)
224229
__free_page(pfn_to_page(gnt_list_entry->pfn));
225230
kfree(gnt_list_entry);
226231
i--;
@@ -249,7 +254,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
249254
/* Assign a gref to this page */
250255
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
251256
BUG_ON(gnt_list_entry->gref == -ENOSPC);
252-
if (!info->feature_persistent) {
257+
if (!info->bounce) {
253258
BUG_ON(!pfn);
254259
gnt_list_entry->pfn = pfn;
255260
}
@@ -506,7 +511,7 @@ static int blkif_queue_request(struct request *req)
506511
kunmap_atomic(segments);
507512

508513
n = i / SEGS_PER_INDIRECT_FRAME;
509-
if (!info->feature_persistent) {
514+
if (!info->bounce) {
510515
struct page *indirect_page;
511516

512517
/* Fetch a pre-allocated page to use for indirect grefs */
@@ -527,7 +532,7 @@ static int blkif_queue_request(struct request *req)
527532

528533
info->shadow[id].grants_used[i] = gnt_list_entry;
529534

530-
if (rq_data_dir(req) && info->feature_persistent) {
535+
if (rq_data_dir(req) && info->bounce) {
531536
char *bvec_data;
532537
void *shared_data;
533538

@@ -711,11 +716,12 @@ static const char *flush_info(unsigned int feature_flush)
711716
static void xlvbd_flush(struct blkfront_info *info)
712717
{
713718
blk_queue_flush(info->rq, info->feature_flush);
714-
pr_info("blkfront: %s: %s %s %s %s %s\n",
719+
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
715720
info->gd->disk_name, flush_info(info->feature_flush),
716721
"persistent grants:", info->feature_persistent ?
717722
"enabled;" : "disabled;", "indirect descriptors:",
718-
info->max_indirect_segments ? "enabled;" : "disabled;");
723+
info->max_indirect_segments ? "enabled;" : "disabled;",
724+
"bounce buffer:", info->bounce ? "enabled" : "disabled;");
719725
}
720726

721727
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -962,7 +968,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
962968
0, 0UL);
963969
info->persistent_gnts_c--;
964970
}
965-
if (info->feature_persistent)
971+
if (info->bounce)
966972
__free_page(pfn_to_page(persistent_gnt->pfn));
967973
kfree(persistent_gnt);
968974
}
@@ -976,7 +982,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
976982
if (!list_empty(&info->indirect_pages)) {
977983
struct page *indirect_page, *n;
978984

979-
BUG_ON(info->feature_persistent);
985+
BUG_ON(info->bounce);
980986
list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
981987
list_del(&indirect_page->lru);
982988
__free_page(indirect_page);
@@ -997,7 +1003,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
9971003
for (j = 0; j < segs; j++) {
9981004
persistent_gnt = info->shadow[i].grants_used[j];
9991005
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1000-
if (info->feature_persistent)
1006+
if (info->bounce)
10011007
__free_page(pfn_to_page(persistent_gnt->pfn));
10021008
kfree(persistent_gnt);
10031009
}
@@ -1057,7 +1063,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
10571063
nseg = s->req.operation == BLKIF_OP_INDIRECT ?
10581064
s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
10591065

1060-
if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1066+
if (bret->operation == BLKIF_OP_READ && info->bounce) {
10611067
/*
10621068
* Copy the data received from the backend into the bvec.
10631069
* Since bv_offset can be different than 0, and bv_len different
@@ -1293,6 +1299,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
12931299
struct xenbus_transaction xbt;
12941300
int err;
12951301

1302+
/* Check if backend is trusted. */
1303+
info->bounce = !xen_blkif_trusted ||
1304+
!xenbus_read_unsigned(dev->nodename, "trusted", 1);
1305+
12961306
/* Create shared ring, alloc event channel. */
12971307
err = setup_blkring(dev, info);
12981308
if (err)
@@ -1697,10 +1707,10 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
16971707
if (err)
16981708
goto out_of_memory;
16991709

1700-
if (!info->feature_persistent && info->max_indirect_segments) {
1710+
if (!info->bounce && info->max_indirect_segments) {
17011711
/*
1702-
* We are using indirect descriptors but not persistent
1703-
* grants, we need to allocate a set of pages that can be
1712+
* We are using indirect descriptors but don't have a bounce
1713+
* buffer, we need to allocate a set of pages that can be
17041714
* used for mapping indirect grefs
17051715
*/
17061716
int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE;
@@ -1864,6 +1874,9 @@ static void blkfront_connect(struct blkfront_info *info)
18641874
else
18651875
info->feature_persistent = persistent;
18661876

1877+
if (info->feature_persistent)
1878+
info->bounce = true;
1879+
18671880
err = blkfront_setup_indirect(info);
18681881
if (err) {
18691882
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",

0 commit comments

Comments
 (0)