@@ -56,7 +56,7 @@ static void
56
56
create_mkey_callback (int status , struct mlx5_async_work * context );
57
57
static struct mlx5_ib_mr * reg_create (struct ib_pd * pd , struct ib_umem * umem ,
58
58
u64 iova , int access_flags ,
59
- unsigned int page_size , bool populate ,
59
+ unsigned long page_size , bool populate ,
60
60
int access_mode );
61
61
static int __mlx5_ib_dereg_mr (struct ib_mr * ibmr );
62
62
@@ -1115,7 +1115,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
1115
1115
struct mlx5r_cache_rb_key rb_key = {};
1116
1116
struct mlx5_cache_ent * ent ;
1117
1117
struct mlx5_ib_mr * mr ;
1118
- unsigned int page_size ;
1118
+ unsigned long page_size ;
1119
1119
1120
1120
if (umem -> is_dmabuf )
1121
1121
page_size = mlx5_umem_dmabuf_default_pgsz (umem , iova );
@@ -1219,7 +1219,7 @@ reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_f
1219
1219
*/
1220
1220
static struct mlx5_ib_mr * reg_create (struct ib_pd * pd , struct ib_umem * umem ,
1221
1221
u64 iova , int access_flags ,
1222
- unsigned int page_size , bool populate ,
1222
+ unsigned long page_size , bool populate ,
1223
1223
int access_mode )
1224
1224
{
1225
1225
struct mlx5_ib_dev * dev = to_mdev (pd -> device );
@@ -1425,7 +1425,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
1425
1425
mr = alloc_cacheable_mr (pd , umem , iova , access_flags ,
1426
1426
MLX5_MKC_ACCESS_MODE_MTT );
1427
1427
} else {
1428
- unsigned int page_size =
1428
+ unsigned long page_size =
1429
1429
mlx5_umem_mkc_find_best_pgsz (dev , umem , iova );
1430
1430
1431
1431
mutex_lock (& dev -> slow_path_mutex );
0 commit comments