Skip to content

Commit 90641b5

Browse files
JohannesGaesslerggerganov
authored andcommitted
CUDA: fix padding logic for FP16/FP32 (llama/8884)
1 parent 4160b93 commit 90641b5

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

ggml/src/ggml-cuda.cu

+1-1
Original file line numberDiff line numberDiff line change
@@ -1501,7 +1501,7 @@ static void ggml_cuda_op_mul_mat(
15011501
}
15021502

15031503
// If src0 is on a temporary compute buffers (partial offloading) there may be some padding that needs to be cleared:
1504-
if (ne00 % MATRIX_ROW_PADDING != 0 && ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && src0->view_src == nullptr) {
1504+
if (ne00 % MATRIX_ROW_PADDING != 0 && ggml_is_quantized(src0->type) && ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && src0->view_src == nullptr) {
15051505
const int64_t nbytes_data = ggml_row_size(src0->type, (dev[id].row_high - dev[id].row_low)*ne00);
15061506
const int64_t nbytes_padding = ggml_row_size(src0->type, MATRIX_ROW_PADDING - ne00 % MATRIX_ROW_PADDING);
15071507
CUDA_CHECK(cudaMemsetAsync(dev[id].src0_dd + nbytes_data , 0, nbytes_padding, stream));

0 commit comments

Comments
 (0)