@@ -114,8 +114,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
114
114
115
115
static int nvme_map_user_request (struct request * req , u64 ubuffer ,
116
116
unsigned bufflen , void __user * meta_buffer , unsigned meta_len ,
117
- struct io_uring_cmd * ioucmd , unsigned int flags ,
118
- unsigned int iou_issue_flags )
117
+ struct iov_iter * iter , unsigned int flags )
119
118
{
120
119
struct request_queue * q = req -> q ;
121
120
struct nvme_ns * ns = q -> queuedata ;
@@ -129,37 +128,23 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
129
128
if (!nvme_ctrl_sgl_supported (ctrl ))
130
129
dev_warn_once (ctrl -> device , "using unchecked data buffer\n" );
131
130
if (has_metadata ) {
132
- if (!supports_metadata ) {
133
- ret = - EINVAL ;
134
- goto out ;
135
- }
131
+ if (!supports_metadata )
132
+ return - EINVAL ;
133
+
136
134
if (!nvme_ctrl_meta_sgl_supported (ctrl ))
137
135
dev_warn_once (ctrl -> device ,
138
136
"using unchecked metadata buffer\n" );
139
137
}
140
138
141
- if (ioucmd && (ioucmd -> flags & IORING_URING_CMD_FIXED )) {
142
- struct iov_iter iter ;
143
-
144
- /* fixedbufs is only for non-vectored io */
145
- if (WARN_ON_ONCE (flags & NVME_IOCTL_VEC )) {
146
- ret = - EINVAL ;
147
- goto out ;
148
- }
149
- ret = io_uring_cmd_import_fixed (ubuffer , bufflen ,
150
- rq_data_dir (req ), & iter , ioucmd ,
151
- iou_issue_flags );
152
- if (ret < 0 )
153
- goto out ;
154
- ret = blk_rq_map_user_iov (q , req , NULL , & iter , GFP_KERNEL );
155
- } else {
139
+ if (iter )
140
+ ret = blk_rq_map_user_iov (q , req , NULL , iter , GFP_KERNEL );
141
+ else
156
142
ret = blk_rq_map_user_io (req , NULL , nvme_to_user_ptr (ubuffer ),
157
143
bufflen , GFP_KERNEL , flags & NVME_IOCTL_VEC , 0 ,
158
144
0 , rq_data_dir (req ));
159
- }
160
145
161
146
if (ret )
162
- goto out ;
147
+ return ret ;
163
148
164
149
bio = req -> bio ;
165
150
if (bdev )
@@ -176,8 +161,6 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
176
161
out_unmap :
177
162
if (bio )
178
163
blk_rq_unmap_user (bio );
179
- out :
180
- blk_mq_free_request (req );
181
164
return ret ;
182
165
}
183
166
@@ -200,9 +183,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
200
183
req -> timeout = timeout ;
201
184
if (ubuffer && bufflen ) {
202
185
ret = nvme_map_user_request (req , ubuffer , bufflen , meta_buffer ,
203
- meta_len , NULL , flags , 0 );
186
+ meta_len , NULL , flags );
204
187
if (ret )
205
- return ret ;
188
+ goto out_free_req ;
206
189
}
207
190
208
191
bio = req -> bio ;
@@ -218,7 +201,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
218
201
219
202
if (effects )
220
203
nvme_passthru_end (ctrl , ns , effects , cmd , ret );
204
+ return ret ;
221
205
206
+ out_free_req :
207
+ blk_mq_free_request (req );
222
208
return ret ;
223
209
}
224
210
@@ -469,6 +455,8 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
469
455
struct request_queue * q = ns ? ns -> queue : ctrl -> admin_q ;
470
456
struct nvme_uring_data d ;
471
457
struct nvme_command c ;
458
+ struct iov_iter iter ;
459
+ struct iov_iter * map_iter = NULL ;
472
460
struct request * req ;
473
461
blk_opf_t rq_flags = REQ_ALLOC_CACHE ;
474
462
blk_mq_req_flags_t blk_flags = 0 ;
@@ -504,6 +492,20 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
504
492
d .metadata_len = READ_ONCE (cmd -> metadata_len );
505
493
d .timeout_ms = READ_ONCE (cmd -> timeout_ms );
506
494
495
+ if (d .data_len && (ioucmd -> flags & IORING_URING_CMD_FIXED )) {
496
+ /* fixedbufs is only for non-vectored io */
497
+ if (vec )
498
+ return - EINVAL ;
499
+
500
+ ret = io_uring_cmd_import_fixed (d .addr , d .data_len ,
501
+ nvme_is_write (& c ) ? WRITE : READ , & iter , ioucmd ,
502
+ issue_flags );
503
+ if (ret < 0 )
504
+ return ret ;
505
+
506
+ map_iter = & iter ;
507
+ }
508
+
507
509
if (issue_flags & IO_URING_F_NONBLOCK ) {
508
510
rq_flags |= REQ_NOWAIT ;
509
511
blk_flags = BLK_MQ_REQ_NOWAIT ;
@@ -517,11 +519,11 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
517
519
req -> timeout = d .timeout_ms ? msecs_to_jiffies (d .timeout_ms ) : 0 ;
518
520
519
521
if (d .data_len ) {
520
- ret = nvme_map_user_request (req , d .addr ,
521
- d . data_len , nvme_to_user_ptr (d .metadata ),
522
- d . metadata_len , ioucmd , vec , issue_flags );
522
+ ret = nvme_map_user_request (req , d .addr , d . data_len ,
523
+ nvme_to_user_ptr (d .metadata ), d . metadata_len ,
524
+ map_iter , vec );
523
525
if (ret )
524
- return ret ;
526
+ goto out_free_req ;
525
527
}
526
528
527
529
/* to free bio on completion, as req->bio will be null at that time */
@@ -531,6 +533,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
531
533
req -> end_io = nvme_uring_cmd_end_io ;
532
534
blk_execute_rq_nowait (req , false);
533
535
return - EIOCBQUEUED ;
536
+
537
+ out_free_req :
538
+ blk_mq_free_request (req );
539
+ return ret ;
534
540
}
535
541
536
542
static bool is_ctrl_ioctl (unsigned int cmd )
0 commit comments