Skip to content

Commit 7312a9d

Browse files
author
Ming Lei
committed
block: fix queue limits checks in blk_rq_map_user_bvec for real
JIRA: https://issues.redhat.com/browse/RHEL-68422 commit be0e822 Author: Christoph Hellwig <[email protected]> Date: Mon Oct 28 10:07:48 2024 +0100 block: fix queue limits checks in blk_rq_map_user_bvec for real blk_rq_map_user_bvec currently only has ad-hoc checks for queue limits, and the last fix to it enabled valid NVMe I/O to pass, but also allowed invalid one for drivers that set a max_segment_size or seg_boundary limit. Fix it once for all by using the bio_split_rw_at helper from the I/O path that indicates if and where a bio would be have to be split to adhere to the queue limits, and it returns a positive value, turn that into -EREMOTEIO to retry using the copy path. Fixes: 2ff9494 ("block: fix sanity checks in blk_rq_map_user_bvec") Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: John Garry <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]> Signed-off-by: Ming Lei <[email protected]>
1 parent 32cd743 commit 7312a9d

File tree

1 file changed

+17
-39
lines changed

1 file changed

+17
-39
lines changed

block/blk-map.c

Lines changed: 17 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -562,55 +562,33 @@ EXPORT_SYMBOL(blk_rq_append_bio);
562562
/* Prepare bio for passthrough IO given ITER_BVEC iter */
563563
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
564564
{
565-
struct request_queue *q = rq->q;
566-
size_t nr_iter = iov_iter_count(iter);
567-
size_t nr_segs = iter->nr_segs;
568-
struct bio_vec *bvecs, *bvprvp = NULL;
569-
const struct queue_limits *lim = &q->limits;
570-
unsigned int nsegs = 0, bytes = 0;
565+
const struct queue_limits *lim = &rq->q->limits;
566+
unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
567+
unsigned int nsegs;
571568
struct bio *bio;
572-
size_t i;
569+
int ret;
573570

574-
if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
575-
return -EINVAL;
576-
if (nr_segs > queue_max_segments(q))
571+
if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
577572
return -EINVAL;
578573

579-
/* no iovecs to alloc, as we already have a BVEC iterator */
574+
/* reuse the bvecs from the iterator instead of allocating new ones */
580575
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
581-
if (bio == NULL)
576+
if (!bio)
582577
return -ENOMEM;
583-
584578
bio_iov_bvec_set(bio, (struct iov_iter *)iter);
585-
blk_rq_bio_prep(rq, bio, nr_segs);
586-
587-
/* loop to perform a bunch of sanity checks */
588-
bvecs = (struct bio_vec *)iter->bvec;
589-
for (i = 0; i < nr_segs; i++) {
590-
struct bio_vec *bv = &bvecs[i];
591-
592-
/*
593-
* If the queue doesn't support SG gaps and adding this
594-
* offset would create a gap, fallback to copy.
595-
*/
596-
if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
597-
blk_mq_map_bio_put(bio);
598-
return -EREMOTEIO;
599-
}
600-
/* check full condition */
601-
if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
602-
goto put_bio;
603-
if (bytes + bv->bv_len > nr_iter)
604-
break;
605579

606-
nsegs++;
607-
bytes += bv->bv_len;
608-
bvprvp = bv;
580+
/* check that the data layout matches the hardware restrictions */
581+
ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
582+
if (ret) {
583+
/* if we would have to split the bio, copy instead */
584+
if (ret > 0)
585+
ret = -EREMOTEIO;
586+
blk_mq_map_bio_put(bio);
587+
return ret;
609588
}
589+
590+
blk_rq_bio_prep(rq, bio, nsegs);
610591
return 0;
611-
put_bio:
612-
blk_mq_map_bio_put(bio);
613-
return -EINVAL;
614592
}
615593

616594
/**

0 commit comments

Comments
 (0)