@@ -98,6 +98,10 @@ static unsigned int xen_blkif_max_segments = 32;
98
98
module_param_named (max , xen_blkif_max_segments , int , S_IRUGO );
99
99
MODULE_PARM_DESC (max , "Maximum amount of segments in indirect requests (default is 32)" );
100
100
101
+ static bool __read_mostly xen_blkif_trusted = true;
102
+ module_param_named (trusted , xen_blkif_trusted , bool , 0644 );
103
+ MODULE_PARM_DESC (trusted , "Is the backend trusted" );
104
+
101
105
#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
102
106
103
107
/*
@@ -131,6 +135,7 @@ struct blkfront_info
131
135
unsigned int discard_granularity ;
132
136
unsigned int discard_alignment ;
133
137
unsigned int feature_persistent :1 ;
138
+ unsigned int bounce :1 ;
134
139
unsigned int max_indirect_segments ;
135
140
int is_ready ;
136
141
};
@@ -200,7 +205,7 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
200
205
if (!gnt_list_entry )
201
206
goto out_of_memory ;
202
207
203
- if (info -> feature_persistent ) {
208
+ if (info -> bounce ) {
204
209
granted_page = alloc_page (GFP_NOIO | __GFP_ZERO );
205
210
if (!granted_page ) {
206
211
kfree (gnt_list_entry );
@@ -220,7 +225,7 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
220
225
list_for_each_entry_safe (gnt_list_entry , n ,
221
226
& info -> grants , node ) {
222
227
list_del (& gnt_list_entry -> node );
223
- if (info -> feature_persistent )
228
+ if (info -> bounce )
224
229
__free_page (pfn_to_page (gnt_list_entry -> pfn ));
225
230
kfree (gnt_list_entry );
226
231
i -- ;
@@ -249,7 +254,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
249
254
/* Assign a gref to this page */
250
255
gnt_list_entry -> gref = gnttab_claim_grant_reference (gref_head );
251
256
BUG_ON (gnt_list_entry -> gref == - ENOSPC );
252
- if (!info -> feature_persistent ) {
257
+ if (!info -> bounce ) {
253
258
BUG_ON (!pfn );
254
259
gnt_list_entry -> pfn = pfn ;
255
260
}
@@ -506,7 +511,7 @@ static int blkif_queue_request(struct request *req)
506
511
kunmap_atomic (segments );
507
512
508
513
n = i / SEGS_PER_INDIRECT_FRAME ;
509
- if (!info -> feature_persistent ) {
514
+ if (!info -> bounce ) {
510
515
struct page * indirect_page ;
511
516
512
517
/* Fetch a pre-allocated page to use for indirect grefs */
@@ -527,7 +532,7 @@ static int blkif_queue_request(struct request *req)
527
532
528
533
info -> shadow [id ].grants_used [i ] = gnt_list_entry ;
529
534
530
- if (rq_data_dir (req ) && info -> feature_persistent ) {
535
+ if (rq_data_dir (req ) && info -> bounce ) {
531
536
char * bvec_data ;
532
537
void * shared_data ;
533
538
@@ -711,11 +716,12 @@ static const char *flush_info(unsigned int feature_flush)
711
716
static void xlvbd_flush (struct blkfront_info * info )
712
717
{
713
718
blk_queue_flush (info -> rq , info -> feature_flush );
714
- pr_info ("blkfront: %s: %s %s %s %s %s\n" ,
719
+ pr_info ("blkfront: %s: %s %s %s %s %s %s %s \n" ,
715
720
info -> gd -> disk_name , flush_info (info -> feature_flush ),
716
721
"persistent grants:" , info -> feature_persistent ?
717
722
"enabled;" : "disabled;" , "indirect descriptors:" ,
718
- info -> max_indirect_segments ? "enabled;" : "disabled;" );
723
+ info -> max_indirect_segments ? "enabled;" : "disabled;" ,
724
+ "bounce buffer:" , info -> bounce ? "enabled" : "disabled;" );
719
725
}
720
726
721
727
static int xen_translate_vdev (int vdevice , int * minor , unsigned int * offset )
@@ -962,7 +968,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
962
968
0 , 0UL );
963
969
info -> persistent_gnts_c -- ;
964
970
}
965
- if (info -> feature_persistent )
971
+ if (info -> bounce )
966
972
__free_page (pfn_to_page (persistent_gnt -> pfn ));
967
973
kfree (persistent_gnt );
968
974
}
@@ -976,7 +982,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
976
982
if (!list_empty (& info -> indirect_pages )) {
977
983
struct page * indirect_page , * n ;
978
984
979
- BUG_ON (info -> feature_persistent );
985
+ BUG_ON (info -> bounce );
980
986
list_for_each_entry_safe (indirect_page , n , & info -> indirect_pages , lru ) {
981
987
list_del (& indirect_page -> lru );
982
988
__free_page (indirect_page );
@@ -997,7 +1003,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
997
1003
for (j = 0 ; j < segs ; j ++ ) {
998
1004
persistent_gnt = info -> shadow [i ].grants_used [j ];
999
1005
gnttab_end_foreign_access (persistent_gnt -> gref , 0 , 0UL );
1000
- if (info -> feature_persistent )
1006
+ if (info -> bounce )
1001
1007
__free_page (pfn_to_page (persistent_gnt -> pfn ));
1002
1008
kfree (persistent_gnt );
1003
1009
}
@@ -1057,7 +1063,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1057
1063
nseg = s -> req .operation == BLKIF_OP_INDIRECT ?
1058
1064
s -> req .u .indirect .nr_segments : s -> req .u .rw .nr_segments ;
1059
1065
1060
- if (bret -> operation == BLKIF_OP_READ && info -> feature_persistent ) {
1066
+ if (bret -> operation == BLKIF_OP_READ && info -> bounce ) {
1061
1067
/*
1062
1068
* Copy the data received from the backend into the bvec.
1063
1069
* Since bv_offset can be different than 0, and bv_len different
@@ -1293,6 +1299,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
1293
1299
struct xenbus_transaction xbt ;
1294
1300
int err ;
1295
1301
1302
+ /* Check if backend is trusted. */
1303
+ info -> bounce = !xen_blkif_trusted ||
1304
+ !xenbus_read_unsigned (dev -> nodename , "trusted" , 1 );
1305
+
1296
1306
/* Create shared ring, alloc event channel. */
1297
1307
err = setup_blkring (dev , info );
1298
1308
if (err )
@@ -1697,10 +1707,10 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
1697
1707
if (err )
1698
1708
goto out_of_memory ;
1699
1709
1700
- if (!info -> feature_persistent && info -> max_indirect_segments ) {
1710
+ if (!info -> bounce && info -> max_indirect_segments ) {
1701
1711
/*
1702
- * We are using indirect descriptors but not persistent
1703
- * grants , we need to allocate a set of pages that can be
1712
+ * We are using indirect descriptors but don't have a bounce
1713
+ * buffer , we need to allocate a set of pages that can be
1704
1714
* used for mapping indirect grefs
1705
1715
*/
1706
1716
int num = INDIRECT_GREFS (segs ) * BLK_RING_SIZE ;
@@ -1864,6 +1874,9 @@ static void blkfront_connect(struct blkfront_info *info)
1864
1874
else
1865
1875
info -> feature_persistent = persistent ;
1866
1876
1877
+ if (info -> feature_persistent )
1878
+ info -> bounce = true;
1879
+
1867
1880
err = blkfront_setup_indirect (info );
1868
1881
if (err ) {
1869
1882
xenbus_dev_fatal (info -> xbdev , err , "setup_indirect at %s" ,
0 commit comments