@@ -26,8 +26,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
26
26
bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL ;
27
27
28
28
for (i = 0 ; i < bl -> num_bufs ; i ++ )
29
- dma_unmap_single (dev , bl -> bufers [i ].addr ,
30
- bl -> bufers [i ].len , bl_dma_dir );
29
+ dma_unmap_single (dev , bl -> buffers [i ].addr ,
30
+ bl -> buffers [i ].len , bl_dma_dir );
31
31
32
32
dma_unmap_single (dev , blp , sz , DMA_TO_DEVICE );
33
33
@@ -36,8 +36,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
36
36
37
37
if (blp != blpout ) {
38
38
for (i = 0 ; i < blout -> num_mapped_bufs ; i ++ ) {
39
- dma_unmap_single (dev , blout -> bufers [i ].addr ,
40
- blout -> bufers [i ].len ,
39
+ dma_unmap_single (dev , blout -> buffers [i ].addr ,
40
+ blout -> buffers [i ].len ,
41
41
DMA_FROM_DEVICE );
42
42
}
43
43
dma_unmap_single (dev , blpout , sz_out , DMA_TO_DEVICE );
@@ -63,7 +63,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
63
63
dma_addr_t blp = DMA_MAPPING_ERROR ;
64
64
dma_addr_t bloutp = DMA_MAPPING_ERROR ;
65
65
struct scatterlist * sg ;
66
- size_t sz_out , sz = struct_size (bufl , bufers , n );
66
+ size_t sz_out , sz = struct_size (bufl , buffers , n );
67
67
int node = dev_to_node (& GET_DEV (accel_dev ));
68
68
int bufl_dma_dir ;
69
69
@@ -86,19 +86,19 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
86
86
bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL ;
87
87
88
88
for (i = 0 ; i < n ; i ++ )
89
- bufl -> bufers [i ].addr = DMA_MAPPING_ERROR ;
89
+ bufl -> buffers [i ].addr = DMA_MAPPING_ERROR ;
90
90
91
91
for_each_sg (sgl , sg , n , i ) {
92
92
int y = sg_nctr ;
93
93
94
94
if (!sg -> length )
95
95
continue ;
96
96
97
- bufl -> bufers [y ].addr = dma_map_single (dev , sg_virt (sg ),
98
- sg -> length ,
99
- bufl_dma_dir );
100
- bufl -> bufers [y ].len = sg -> length ;
101
- if (unlikely (dma_mapping_error (dev , bufl -> bufers [y ].addr )))
97
+ bufl -> buffers [y ].addr = dma_map_single (dev , sg_virt (sg ),
98
+ sg -> length ,
99
+ bufl_dma_dir );
100
+ bufl -> buffers [y ].len = sg -> length ;
101
+ if (unlikely (dma_mapping_error (dev , bufl -> buffers [y ].addr )))
102
102
goto err_in ;
103
103
sg_nctr ++ ;
104
104
}
@@ -111,12 +111,12 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
111
111
buf -> sz = sz ;
112
112
/* Handle out of place operation */
113
113
if (sgl != sglout ) {
114
- struct qat_alg_buf * bufers ;
114
+ struct qat_alg_buf * buffers ;
115
115
int extra_buff = extra_dst_buff ? 1 : 0 ;
116
116
int n_sglout = sg_nents (sglout );
117
117
118
118
n = n_sglout + extra_buff ;
119
- sz_out = struct_size (buflout , bufers , n );
119
+ sz_out = struct_size (buflout , buffers , n );
120
120
sg_nctr = 0 ;
121
121
122
122
if (n > QAT_MAX_BUFF_DESC ) {
@@ -129,27 +129,27 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
129
129
buf -> sgl_dst_valid = true;
130
130
}
131
131
132
- bufers = buflout -> bufers ;
132
+ buffers = buflout -> buffers ;
133
133
for (i = 0 ; i < n ; i ++ )
134
- bufers [i ].addr = DMA_MAPPING_ERROR ;
134
+ buffers [i ].addr = DMA_MAPPING_ERROR ;
135
135
136
136
for_each_sg (sglout , sg , n_sglout , i ) {
137
137
int y = sg_nctr ;
138
138
139
139
if (!sg -> length )
140
140
continue ;
141
141
142
- bufers [y ].addr = dma_map_single (dev , sg_virt (sg ),
143
- sg -> length ,
144
- DMA_FROM_DEVICE );
145
- if (unlikely (dma_mapping_error (dev , bufers [y ].addr )))
142
+ buffers [y ].addr = dma_map_single (dev , sg_virt (sg ),
143
+ sg -> length ,
144
+ DMA_FROM_DEVICE );
145
+ if (unlikely (dma_mapping_error (dev , buffers [y ].addr )))
146
146
goto err_out ;
147
- bufers [y ].len = sg -> length ;
147
+ buffers [y ].len = sg -> length ;
148
148
sg_nctr ++ ;
149
149
}
150
150
if (extra_buff ) {
151
- bufers [sg_nctr ].addr = extra_dst_buff ;
152
- bufers [sg_nctr ].len = sz_extra_dst_buff ;
151
+ buffers [sg_nctr ].addr = extra_dst_buff ;
152
+ buffers [sg_nctr ].len = sz_extra_dst_buff ;
153
153
}
154
154
155
155
buflout -> num_bufs = sg_nctr ;
@@ -174,11 +174,11 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
174
174
175
175
n = sg_nents (sglout );
176
176
for (i = 0 ; i < n ; i ++ ) {
177
- if (buflout -> bufers [i ].addr == extra_dst_buff )
177
+ if (buflout -> buffers [i ].addr == extra_dst_buff )
178
178
break ;
179
- if (!dma_mapping_error (dev , buflout -> bufers [i ].addr ))
180
- dma_unmap_single (dev , buflout -> bufers [i ].addr ,
181
- buflout -> bufers [i ].len ,
179
+ if (!dma_mapping_error (dev , buflout -> buffers [i ].addr ))
180
+ dma_unmap_single (dev , buflout -> buffers [i ].addr ,
181
+ buflout -> buffers [i ].len ,
182
182
DMA_FROM_DEVICE );
183
183
}
184
184
@@ -191,9 +191,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
191
191
192
192
n = sg_nents (sgl );
193
193
for (i = 0 ; i < n ; i ++ )
194
- if (!dma_mapping_error (dev , bufl -> bufers [i ].addr ))
195
- dma_unmap_single (dev , bufl -> bufers [i ].addr ,
196
- bufl -> bufers [i ].len ,
194
+ if (!dma_mapping_error (dev , bufl -> buffers [i ].addr ))
195
+ dma_unmap_single (dev , bufl -> buffers [i ].addr ,
196
+ bufl -> buffers [i ].len ,
197
197
bufl_dma_dir );
198
198
199
199
if (!buf -> sgl_src_valid )
@@ -231,9 +231,9 @@ static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
231
231
int i ;
232
232
233
233
for (i = 0 ; i < n ; i ++ )
234
- if (!dma_mapping_error (dev , bl -> bufers [i ].addr ))
235
- dma_unmap_single (dev , bl -> bufers [i ].addr ,
236
- bl -> bufers [i ].len , DMA_FROM_DEVICE );
234
+ if (!dma_mapping_error (dev , bl -> buffers [i ].addr ))
235
+ dma_unmap_single (dev , bl -> buffers [i ].addr ,
236
+ bl -> buffers [i ].len , DMA_FROM_DEVICE );
237
237
}
238
238
239
239
static int qat_bl_sgl_map (struct adf_accel_dev * accel_dev ,
@@ -248,13 +248,13 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
248
248
size_t sz ;
249
249
250
250
n = sg_nents (sgl );
251
- sz = struct_size (bufl , bufers , n );
251
+ sz = struct_size (bufl , buffers , n );
252
252
bufl = kzalloc_node (sz , GFP_KERNEL , node );
253
253
if (unlikely (!bufl ))
254
254
return - ENOMEM ;
255
255
256
256
for (i = 0 ; i < n ; i ++ )
257
- bufl -> bufers [i ].addr = DMA_MAPPING_ERROR ;
257
+ bufl -> buffers [i ].addr = DMA_MAPPING_ERROR ;
258
258
259
259
sg_nctr = 0 ;
260
260
for_each_sg (sgl , sg , n , i ) {
@@ -263,11 +263,11 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
263
263
if (!sg -> length )
264
264
continue ;
265
265
266
- bufl -> bufers [y ].addr = dma_map_single (dev , sg_virt (sg ),
267
- sg -> length ,
268
- DMA_FROM_DEVICE );
269
- bufl -> bufers [y ].len = sg -> length ;
270
- if (unlikely (dma_mapping_error (dev , bufl -> bufers [y ].addr )))
266
+ bufl -> buffers [y ].addr = dma_map_single (dev , sg_virt (sg ),
267
+ sg -> length ,
268
+ DMA_FROM_DEVICE );
269
+ bufl -> buffers [y ].len = sg -> length ;
270
+ if (unlikely (dma_mapping_error (dev , bufl -> buffers [y ].addr )))
271
271
goto err_map ;
272
272
sg_nctr ++ ;
273
273
}
@@ -280,9 +280,9 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
280
280
281
281
err_map :
282
282
for (i = 0 ; i < n ; i ++ )
283
- if (!dma_mapping_error (dev , bufl -> bufers [i ].addr ))
284
- dma_unmap_single (dev , bufl -> bufers [i ].addr ,
285
- bufl -> bufers [i ].len ,
283
+ if (!dma_mapping_error (dev , bufl -> buffers [i ].addr ))
284
+ dma_unmap_single (dev , bufl -> buffers [i ].addr ,
285
+ bufl -> buffers [i ].len ,
286
286
DMA_FROM_DEVICE );
287
287
kfree (bufl );
288
288
* bl = NULL ;
@@ -351,7 +351,7 @@ int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
351
351
if (ret )
352
352
return ret ;
353
353
354
- new_bl_size = struct_size (new_bl , bufers , new_bl -> num_bufs );
354
+ new_bl_size = struct_size (new_bl , buffers , new_bl -> num_bufs );
355
355
356
356
/* Map new firmware SGL descriptor */
357
357
new_blp = dma_map_single (dev , new_bl , new_bl_size , DMA_TO_DEVICE );
0 commit comments