-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathget_table_kernel.cu
More file actions
310 lines (267 loc) · 12.2 KB
/
get_table_kernel.cu
File metadata and controls
310 lines (267 loc) · 12.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
// #include <bits/stdc++.h>
// #include <cuda_runtime.h>
#include "static_switch.h"
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAStream.h>
#include <torch/extension.h>
// constexpr int kTokenNum = 8192;
// constexpr int kBs = 1;
// constexpr int kSeqlenQMax = 8192;
constexpr int kHeadGroup = 2;
// // constexpr int kseqlenQ_max
constexpr int kSparseBlockSize = 64;
// constexpr int kSparseTopK = 96;
constexpr int kTopkPerBlock = 16;
// constexpr int kBlockPerTokenHead = kSparseTopK / kTopkPerBlock;
// topk_idx: [head_group, token_num, kSparseTopK]: int32 [2, 8192, 96]
// block_table: [batch_size, seqlen_q_max]: int32 [1, 8192]
// token_to_bs: [token_num]: int32 [8192]
// token_pos_in_bs: [token_num]: int32 [8192]
// seqlen_q: [batch_size]: int32 [1]
// out_block_table: [token_num, head_group, kSparseTopK * kSparseBlockSize]:
// int32 [2, 8192, 96 * 64] seqlen_q_max: int
template <int kSparseTopK>
__global__ void
get_block_table_cuda_v1(const int *topk_idx, const int *block_table,
const int *token_to_bs, const int *token_pos_in_bs,
const int *seqlen_q, int *out_block_table,
const int seqlen_q_max, const int token_num) {
constexpr int kBlockPerTokenHead = kSparseTopK / kTopkPerBlock;
int token_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (token_idx >= token_num)
return;
int bs = token_to_bs[token_idx];
int pos_in_bs = token_pos_in_bs[token_idx];
for (int h = 0; h < kHeadGroup; h++) {
for (int i = 0; i < kSparseTopK * kSparseBlockSize; i++) {
int sparse_block_idx =
topk_idx[h * token_num * kSparseTopK + token_idx * kSparseTopK +
i / kSparseBlockSize];
if (sparse_block_idx < 0)
continue;
int token_idx_in_batch =
sparse_block_idx * kSparseBlockSize + (i % kSparseBlockSize);
if (token_idx_in_batch < seqlen_q[bs] && token_idx_in_batch < pos_in_bs) {
out_block_table[token_idx * kHeadGroup * kSparseTopK *
kSparseBlockSize +
h * kSparseTopK * kSparseBlockSize + i] =
kHeadGroup * block_table[bs * seqlen_q_max + token_idx_in_batch] +
h;
} else {
out_block_table[token_idx * kHeadGroup * kSparseTopK *
kSparseBlockSize +
h * kSparseTopK * kSparseBlockSize + i] = 0;
}
}
}
}
// 1 thread calc 64 element of out_block_table
// This allows topk_idx to be read once and all corresponding
// out_block_table elements calculated, reducing memory access
template <int kSparseTopK>
__global__ void
get_block_table_cuda_v2(const int *topk_idx, const int *block_table,
const int *token_to_bs, const int *token_pos_in_bs,
const int *seqlen_q, int *out_block_table,
const int seqlen_q_max, const int token_num) {
constexpr int kBlockPerTokenHead = kSparseTopK / kTopkPerBlock;
int token_idx =
(blockIdx.x * blockDim.x + threadIdx.x) / (kSparseTopK * kHeadGroup);
if (token_idx >= token_num)
return;
int head_group_idx =
((blockIdx.x * blockDim.x + threadIdx.x) / kSparseTopK) % kHeadGroup;
int topk_idx_in_head = (blockIdx.x * blockDim.x + threadIdx.x) % kSparseTopK;
int bs = token_to_bs[token_idx];
int pos_in_bs = token_pos_in_bs[token_idx];
int seqlen_q_bs = seqlen_q[bs];
int sparse_block_idx = topk_idx[head_group_idx * token_num * kSparseTopK +
token_idx * kSparseTopK + topk_idx_in_head];
if (sparse_block_idx < 0)
return;
for (int i = 0; i < kSparseBlockSize; i++) {
int token_idx_in_batch = sparse_block_idx * kSparseBlockSize + i;
if (token_idx_in_batch < seqlen_q_bs && token_idx_in_batch < pos_in_bs) {
out_block_table[token_idx * kHeadGroup * kSparseTopK * kSparseBlockSize +
head_group_idx * kSparseTopK * kSparseBlockSize +
topk_idx_in_head * kSparseBlockSize + i] =
kHeadGroup * block_table[bs * seqlen_q_max + token_idx_in_batch] +
head_group_idx;
} else {
out_block_table[token_idx * kHeadGroup * kSparseTopK * kSparseBlockSize +
head_group_idx * kSparseTopK * kSparseBlockSize +
topk_idx_in_head * kSparseBlockSize + i] = 0;
}
}
}
// opt for decode
// 1 thread calc 1 element of out_block_table
// block size 1024
// smem 1024 / 64 = 16
template <int kSparseTopK>
__global__ void
get_block_table_cuda_v3(const int *topk_idx, const int *block_table,
const int *token_to_bs, const int *token_pos_in_bs,
const int *seqlen_q, int *out_block_table,
const int seqlen_q_max, const int token_num) {
constexpr int kBlockPerTokenHead = kSparseTopK / kTopkPerBlock;
// calc 16 topk -> 1024 output
__shared__ int topk_idx_share[kTopkPerBlock];
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
if (threadIdx.x < kTopkPerBlock) {
topk_idx_share[tidx] = topk_idx[bidx * kTopkPerBlock + tidx];
}
__syncthreads();
const int head_group_idx = (bidx / kBlockPerTokenHead) / token_num;
const int token_idx = (bidx / kBlockPerTokenHead) % token_num;
const int topk_idx_in_head =
bidx % kBlockPerTokenHead * kTopkPerBlock + tidx / kSparseBlockSize;
const int sparse_block_idx = topk_idx_share[tidx / kSparseBlockSize];
const int token_idx_src =
sparse_block_idx * kSparseBlockSize + tidx % kSparseBlockSize;
const int token_idx_dst =
token_idx * kHeadGroup * kSparseTopK * kSparseBlockSize +
head_group_idx * kSparseTopK * kSparseBlockSize +
topk_idx_in_head * kSparseBlockSize + tidx % kSparseBlockSize;
const int bs = token_to_bs[token_idx];
const int pos_in_bs = token_pos_in_bs[token_idx];
const int seqlen_q_bs = seqlen_q[bs];
if (token_idx_src < seqlen_q_bs && token_idx_src < pos_in_bs) {
out_block_table[token_idx_dst] =
kHeadGroup * block_table[bs * seqlen_q_max + token_idx_src] +
head_group_idx;
} else {
out_block_table[token_idx_dst] = 0;
}
}
torch::Tensor get_block_table_v1_wrapper(
const torch::Tensor &topk_idx, // [head_group, token_num, kSparseTopK]
const torch::Tensor &block_table, // [batch_size, seqlen_q_max]
const torch::Tensor &token_to_bs, // [token_num]
const torch::Tensor &token_pos_in_bs, // [token_num]
const torch::Tensor &seqlen_q, // [batch_size]
const int topk) {
TORCH_CHECK(topk_idx.is_cuda(), "topk_idx must be a CUDA tensor");
TORCH_CHECK(topk_idx.dtype() == torch::kInt, "All inputs must be int32");
int token_num = topk_idx.size(1);
int seqlen_q_max = block_table.size(1);
const int batch_size = block_table.size(0);
const int BLOCK_SIZE = topk * kSparseBlockSize;
TORCH_CHECK(topk_idx.sizes() ==
torch::IntArrayRef({kHeadGroup, token_num, topk}),
"topk_idx shape incorrect");
TORCH_CHECK(block_table.sizes() ==
torch::IntArrayRef({batch_size, seqlen_q_max}),
"block_table shape incorrect");
TORCH_CHECK(token_to_bs.size(0) == token_num, "token_to_bs size incorrect");
torch::Tensor out_block_table =
torch::zeros({token_num, kHeadGroup, BLOCK_SIZE},
topk_idx.options() // 继承 dtype 和 device
)
.contiguous();
const int THREADS_PER_BLOCK = 256;
const int NUM_BLOCKS =
(token_num + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
VALUE_SPLITS_SWITCH(topk, kSparseTopK, [&]() {
auto kernel = get_block_table_cuda_v1<kSparseTopK>;
kernel<<<NUM_BLOCKS, THREADS_PER_BLOCK, 0, stream>>>(
topk_idx.data_ptr<int>(), block_table.data_ptr<int>(),
token_to_bs.data_ptr<int>(), token_pos_in_bs.data_ptr<int>(),
seqlen_q.data_ptr<int>(), out_block_table.data_ptr<int>(), seqlen_q_max,
token_num);
});
// cudaDeviceSynchronize();
return out_block_table;
}
torch::Tensor get_block_table_v2_wrapper(
const torch::Tensor &topk_idx, // [head_group, token_num, kSparseTopK]
const torch::Tensor &block_table, // [batch_size, seqlen_q_max]
const torch::Tensor &token_to_bs, // [token_num]
const torch::Tensor &token_pos_in_bs, // [token_num]
const torch::Tensor &seqlen_q, // [batch_size]
const int topk) {
TORCH_CHECK(topk_idx.is_cuda(), "topk_idx must be a CUDA tensor");
TORCH_CHECK(topk_idx.dtype() == torch::kInt, "All inputs must be int32");
int token_num = topk_idx.size(1);
int seqlen_q_max = block_table.size(1);
const int batch_size = block_table.size(0);
const int BLOCK_SIZE = topk * kSparseBlockSize;
TORCH_CHECK(topk_idx.sizes() ==
torch::IntArrayRef({kHeadGroup, token_num, topk}),
"topk_idx shape incorrect");
TORCH_CHECK(block_table.sizes() ==
torch::IntArrayRef({batch_size, seqlen_q_max}),
"block_table shape incorrect");
TORCH_CHECK(token_to_bs.size(0) == token_num, "token_to_bs size incorrect");
torch::Tensor out_block_table =
torch::zeros({token_num, kHeadGroup, BLOCK_SIZE},
topk_idx.options() // 继承 dtype 和 device
)
.contiguous();
const int THREADS_PER_BLOCK = 1024;
const int NUM_BLOCKS =
(token_num * kHeadGroup * topk + THREADS_PER_BLOCK - 1) /
THREADS_PER_BLOCK;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
VALUE_SPLITS_SWITCH(topk, kSparseTopK, [&]() {
auto kernel = get_block_table_cuda_v2<kSparseTopK>;
kernel<<<NUM_BLOCKS, THREADS_PER_BLOCK, 0, stream>>>(
topk_idx.data_ptr<int>(), block_table.data_ptr<int>(),
token_to_bs.data_ptr<int>(), token_pos_in_bs.data_ptr<int>(),
seqlen_q.data_ptr<int>(), out_block_table.data_ptr<int>(), seqlen_q_max,
token_num);
});
// cudaDeviceSynchronize();
return out_block_table;
}
torch::Tensor get_block_table_v3_wrapper(
const torch::Tensor &topk_idx, // [head_group, token_num, kSparseTopK]
const torch::Tensor &block_table, // [batch_size, seqlen_q_max]
const torch::Tensor &token_to_bs, // [token_num]
const torch::Tensor &token_pos_in_bs, // [token_num]
const torch::Tensor &seqlen_q, // [batch_size]
const int topk) {
TORCH_CHECK(topk_idx.is_cuda(), "topk_idx must be a CUDA tensor");
TORCH_CHECK(topk_idx.dtype() == torch::kInt, "All inputs must be int32");
int token_num = topk_idx.size(1);
int seqlen_q_max = block_table.size(1);
const int batch_size = block_table.size(0);
const int BLOCK_SIZE = topk * kSparseBlockSize;
TORCH_CHECK(topk_idx.sizes() ==
torch::IntArrayRef({kHeadGroup, token_num, topk}),
"topk_idx shape incorrect");
TORCH_CHECK(block_table.sizes() ==
torch::IntArrayRef({batch_size, seqlen_q_max}),
"block_table shape incorrect");
TORCH_CHECK(token_to_bs.size(0) == token_num, "token_to_bs size incorrect");
torch::Tensor out_block_table =
torch::zeros({token_num, kHeadGroup, BLOCK_SIZE},
topk_idx.options() // 继承 dtype 和 device
)
.contiguous();
const int THREADS_PER_BLOCK = 1024;
const int NUM_BLOCKS = (token_num * kHeadGroup * topk * kSparseBlockSize +
THREADS_PER_BLOCK - 1) /
THREADS_PER_BLOCK;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
VALUE_SPLITS_SWITCH(topk, kSparseTopK, [&]() {
auto kernel = get_block_table_cuda_v3<kSparseTopK>;
kernel<<<NUM_BLOCKS, THREADS_PER_BLOCK, 0, stream>>>(
topk_idx.data_ptr<int>(), block_table.data_ptr<int>(),
token_to_bs.data_ptr<int>(), token_pos_in_bs.data_ptr<int>(),
seqlen_q.data_ptr<int>(), out_block_table.data_ptr<int>(), seqlen_q_max,
token_num);
});
// cudaDeviceSynchronize();
return out_block_table;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("get_block_table_v1", &get_block_table_v1_wrapper,
"Sparse Attention Block Table Getter (CUDA)");
m.def("get_block_table_v2", &get_block_table_v2_wrapper,
"Sparse Attention Block Table Getter (CUDA)");
m.def("get_block_table_v3", &get_block_table_v3_wrapper,
"Sparse Attention Block Table Getter (CUDA)");
}