Skip to content

Commit 076f2cf

Browse files
committed
crypto: qat - fix spelling mistakes from 'bufer' to 'buffer'
jira LE-1907 Rebuild_History Non-Buildable kernel-rt-5.14.0-284.30.1.rt14.315.el9_2 commit-author Meadhbh Fitzpatrick <[email protected]> commit 692ed5d Fix spelling mistakes from 'bufer' to 'buffer' in qat_common. Also fix indentation issue caused by the spelling change. Signed-off-by: Meadhbh Fitzpatrick <[email protected]> Reviewed-by: Ilpo Järvinen <[email protected]> Reviewed-by: Giovanni Cabiddu <[email protected]> Signed-off-by: Herbert Xu <[email protected]> (cherry picked from commit 692ed5d) Signed-off-by: Jonathan Maple <[email protected]>
1 parent e5dd7e6 commit 076f2cf

File tree

3 files changed

+45
-45
lines changed

3 files changed

+45
-45
lines changed

drivers/crypto/qat/qat_common/adf_transport_access_macros.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
3838
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
3939

40-
/* Minimum ring bufer size for memory allocation */
40+
/* Minimum ring buffer size for memory allocation */
4141
#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
4242
((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
4343
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)

drivers/crypto/qat/qat_common/qat_bl.c

Lines changed: 43 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
2626
bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
2727

2828
for (i = 0; i < bl->num_bufs; i++)
29-
dma_unmap_single(dev, bl->bufers[i].addr,
30-
bl->bufers[i].len, bl_dma_dir);
29+
dma_unmap_single(dev, bl->buffers[i].addr,
30+
bl->buffers[i].len, bl_dma_dir);
3131

3232
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
3333

@@ -36,8 +36,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
3636

3737
if (blp != blpout) {
3838
for (i = 0; i < blout->num_mapped_bufs; i++) {
39-
dma_unmap_single(dev, blout->bufers[i].addr,
40-
blout->bufers[i].len,
39+
dma_unmap_single(dev, blout->buffers[i].addr,
40+
blout->buffers[i].len,
4141
DMA_FROM_DEVICE);
4242
}
4343
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -63,7 +63,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
6363
dma_addr_t blp = DMA_MAPPING_ERROR;
6464
dma_addr_t bloutp = DMA_MAPPING_ERROR;
6565
struct scatterlist *sg;
66-
size_t sz_out, sz = struct_size(bufl, bufers, n);
66+
size_t sz_out, sz = struct_size(bufl, buffers, n);
6767
int node = dev_to_node(&GET_DEV(accel_dev));
6868
int bufl_dma_dir;
6969

@@ -86,19 +86,19 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
8686
bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
8787

8888
for (i = 0; i < n; i++)
89-
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
89+
bufl->buffers[i].addr = DMA_MAPPING_ERROR;
9090

9191
for_each_sg(sgl, sg, n, i) {
9292
int y = sg_nctr;
9393

9494
if (!sg->length)
9595
continue;
9696

97-
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
98-
sg->length,
99-
bufl_dma_dir);
100-
bufl->bufers[y].len = sg->length;
101-
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
97+
bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
98+
sg->length,
99+
bufl_dma_dir);
100+
bufl->buffers[y].len = sg->length;
101+
if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
102102
goto err_in;
103103
sg_nctr++;
104104
}
@@ -111,12 +111,12 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
111111
buf->sz = sz;
112112
/* Handle out of place operation */
113113
if (sgl != sglout) {
114-
struct qat_alg_buf *bufers;
114+
struct qat_alg_buf *buffers;
115115
int extra_buff = extra_dst_buff ? 1 : 0;
116116
int n_sglout = sg_nents(sglout);
117117

118118
n = n_sglout + extra_buff;
119-
sz_out = struct_size(buflout, bufers, n);
119+
sz_out = struct_size(buflout, buffers, n);
120120
sg_nctr = 0;
121121

122122
if (n > QAT_MAX_BUFF_DESC) {
@@ -129,27 +129,27 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
129129
buf->sgl_dst_valid = true;
130130
}
131131

132-
bufers = buflout->bufers;
132+
buffers = buflout->buffers;
133133
for (i = 0; i < n; i++)
134-
bufers[i].addr = DMA_MAPPING_ERROR;
134+
buffers[i].addr = DMA_MAPPING_ERROR;
135135

136136
for_each_sg(sglout, sg, n_sglout, i) {
137137
int y = sg_nctr;
138138

139139
if (!sg->length)
140140
continue;
141141

142-
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
143-
sg->length,
144-
DMA_FROM_DEVICE);
145-
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
142+
buffers[y].addr = dma_map_single(dev, sg_virt(sg),
143+
sg->length,
144+
DMA_FROM_DEVICE);
145+
if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
146146
goto err_out;
147-
bufers[y].len = sg->length;
147+
buffers[y].len = sg->length;
148148
sg_nctr++;
149149
}
150150
if (extra_buff) {
151-
bufers[sg_nctr].addr = extra_dst_buff;
152-
bufers[sg_nctr].len = sz_extra_dst_buff;
151+
buffers[sg_nctr].addr = extra_dst_buff;
152+
buffers[sg_nctr].len = sz_extra_dst_buff;
153153
}
154154

155155
buflout->num_bufs = sg_nctr;
@@ -174,11 +174,11 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
174174

175175
n = sg_nents(sglout);
176176
for (i = 0; i < n; i++) {
177-
if (buflout->bufers[i].addr == extra_dst_buff)
177+
if (buflout->buffers[i].addr == extra_dst_buff)
178178
break;
179-
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
180-
dma_unmap_single(dev, buflout->bufers[i].addr,
181-
buflout->bufers[i].len,
179+
if (!dma_mapping_error(dev, buflout->buffers[i].addr))
180+
dma_unmap_single(dev, buflout->buffers[i].addr,
181+
buflout->buffers[i].len,
182182
DMA_FROM_DEVICE);
183183
}
184184

@@ -191,9 +191,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
191191

192192
n = sg_nents(sgl);
193193
for (i = 0; i < n; i++)
194-
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
195-
dma_unmap_single(dev, bufl->bufers[i].addr,
196-
bufl->bufers[i].len,
194+
if (!dma_mapping_error(dev, bufl->buffers[i].addr))
195+
dma_unmap_single(dev, bufl->buffers[i].addr,
196+
bufl->buffers[i].len,
197197
bufl_dma_dir);
198198

199199
if (!buf->sgl_src_valid)
@@ -231,9 +231,9 @@ static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
231231
int i;
232232

233233
for (i = 0; i < n; i++)
234-
if (!dma_mapping_error(dev, bl->bufers[i].addr))
235-
dma_unmap_single(dev, bl->bufers[i].addr,
236-
bl->bufers[i].len, DMA_FROM_DEVICE);
234+
if (!dma_mapping_error(dev, bl->buffers[i].addr))
235+
dma_unmap_single(dev, bl->buffers[i].addr,
236+
bl->buffers[i].len, DMA_FROM_DEVICE);
237237
}
238238

239239
static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
@@ -248,13 +248,13 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
248248
size_t sz;
249249

250250
n = sg_nents(sgl);
251-
sz = struct_size(bufl, bufers, n);
251+
sz = struct_size(bufl, buffers, n);
252252
bufl = kzalloc_node(sz, GFP_KERNEL, node);
253253
if (unlikely(!bufl))
254254
return -ENOMEM;
255255

256256
for (i = 0; i < n; i++)
257-
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
257+
bufl->buffers[i].addr = DMA_MAPPING_ERROR;
258258

259259
sg_nctr = 0;
260260
for_each_sg(sgl, sg, n, i) {
@@ -263,11 +263,11 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
263263
if (!sg->length)
264264
continue;
265265

266-
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
267-
sg->length,
268-
DMA_FROM_DEVICE);
269-
bufl->bufers[y].len = sg->length;
270-
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
266+
bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
267+
sg->length,
268+
DMA_FROM_DEVICE);
269+
bufl->buffers[y].len = sg->length;
270+
if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
271271
goto err_map;
272272
sg_nctr++;
273273
}
@@ -280,9 +280,9 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
280280

281281
err_map:
282282
for (i = 0; i < n; i++)
283-
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
284-
dma_unmap_single(dev, bufl->bufers[i].addr,
285-
bufl->bufers[i].len,
283+
if (!dma_mapping_error(dev, bufl->buffers[i].addr))
284+
dma_unmap_single(dev, bufl->buffers[i].addr,
285+
bufl->buffers[i].len,
286286
DMA_FROM_DEVICE);
287287
kfree(bufl);
288288
*bl = NULL;
@@ -351,7 +351,7 @@ int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
351351
if (ret)
352352
return ret;
353353

354-
new_bl_size = struct_size(new_bl, bufers, new_bl->num_bufs);
354+
new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
355355

356356
/* Map new firmware SGL descriptor */
357357
new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);

drivers/crypto/qat/qat_common/qat_bl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ struct qat_alg_buf_list {
1818
u64 resrvd;
1919
u32 num_bufs;
2020
u32 num_mapped_bufs;
21-
struct qat_alg_buf bufers[];
21+
struct qat_alg_buf buffers[];
2222
} __packed;
2323

2424
struct qat_alg_fixed_buf_list {

0 commit comments

Comments
 (0)