|
| 1 | +use super::{args::Meta, Args, AttentionMLA}; |
| 2 | +use crate::{ |
| 3 | + dyn_, fuesd_softmax, get_static, mat_mul, rearrange, ByteOf, Hardware, LaunchError, QueueAlloc, |
| 4 | + SchemeError, TensorLayout, Workspace, WorkspaceCollector, |
| 5 | +}; |
| 6 | +use ndarray_layout::ArrayLayout; |
| 7 | +use std::marker::PhantomData; |
| 8 | + |
| 9 | +pub struct Operator<Hardware, MatMul, Softmax, Rearrange> { |
| 10 | + mat_mul: MatMul, |
| 11 | + softmax: Softmax, |
| 12 | + rearrange: Rearrange, |
| 13 | + _phantom: PhantomData<Hardware>, |
| 14 | +} |
| 15 | + |
| 16 | +impl<H, M, S, R> AttentionMLA<H> for Operator<H, M, S, R> |
| 17 | +where |
| 18 | + H: Hardware, |
| 19 | + M: mat_mul::MatMul<H>, |
| 20 | + S: fuesd_softmax::FusedSoftmax<H>, |
| 21 | + R: rearrange::Rearrange<H>, |
| 22 | +{ |
| 23 | +} |
| 24 | + |
| 25 | +impl<H, M, S, R> crate::Operator for Operator<H, M, S, R> |
| 26 | +where |
| 27 | + H: Hardware, |
| 28 | + M: mat_mul::MatMul<H>, |
| 29 | + S: fuesd_softmax::FusedSoftmax<H>, |
| 30 | + R: rearrange::Rearrange<H>, |
| 31 | +{ |
| 32 | + type Hardware = H; |
| 33 | + type TopoNode = H; |
| 34 | + type Args = Args<H>; |
| 35 | + |
| 36 | + fn new(node: &Self::TopoNode) -> Self { |
| 37 | + Self { |
| 38 | + mat_mul: M::new(node), |
| 39 | + softmax: S::new(node), |
| 40 | + rearrange: R::new(node), |
| 41 | + _phantom: PhantomData, |
| 42 | + } |
| 43 | + } |
| 44 | + |
| 45 | + fn scheme( |
| 46 | + &mut self, |
| 47 | + args: &Self::Args, |
| 48 | + max_workspace_size: usize, |
| 49 | + ) -> Result<usize, SchemeError> { |
| 50 | + // TODO |
| 51 | + Ok(0) |
| 52 | + } |
| 53 | + |
| 54 | + fn launch<QA>( |
| 55 | + &self, |
| 56 | + args: &Self::Args, |
| 57 | + workspace: &mut [ByteOf<Self::Hardware>], |
| 58 | + queue_alloc: &QA, |
| 59 | + ) -> Result<(), LaunchError> |
| 60 | + where |
| 61 | + QA: QueueAlloc<Hardware = Self::Hardware>, |
| 62 | + { |
| 63 | + let Meta { |
| 64 | + dt, |
| 65 | + nh, |
| 66 | + seq, |
| 67 | + att, |
| 68 | + dkv, |
| 69 | + dv, |
| 70 | + dr, |
| 71 | + } = args.meta()?; |
| 72 | + let Args { |
| 73 | + q_layout, |
| 74 | + q_base, |
| 75 | + kv_layout, |
| 76 | + kv_base, |
| 77 | + absorb_layout, |
| 78 | + absorb_base, |
| 79 | + qr_layout, |
| 80 | + qr_base, |
| 81 | + kr_layout, |
| 82 | + kr_base, |
| 83 | + o_layout, |
| 84 | + o_base, |
| 85 | + mask, |
| 86 | + } = args; |
| 87 | + |
| 88 | + let &[nh_skv, att_skv, dkv_skv] = kv_layout.strides() else { |
| 89 | + unreachable!() |
| 90 | + }; |
| 91 | + let &[nh_skr, att_skr, dr_skr] = kr_layout.strides() else { |
| 92 | + unreachable!() |
| 93 | + }; |
| 94 | + let &[nh_sa, dv_sa, dkv_sa] = absorb_layout.strides() else { |
| 95 | + unreachable!() |
| 96 | + }; |
| 97 | + let &[nh_so, seq_so, dv_so] = o_layout.strides() else { |
| 98 | + unreachable!() |
| 99 | + }; |
| 100 | + let ele = dt.nbytes(); |
| 101 | + get_static! { |
| 102 | + nh seq dkv dr |
| 103 | + nh_skv att_skv dkv_skv |
| 104 | + nh_skr att_skr dr_skr |
| 105 | + nh_sa dv_sa dkv_sa |
| 106 | + nh_so seq_so dv_so |
| 107 | + dv att |
| 108 | + }; |
| 109 | + |
| 110 | + #[inline(always)] |
| 111 | + fn layout(shape: [usize; 3], strides: [isize; 3]) -> ArrayLayout<3> { |
| 112 | + ArrayLayout::new(&shape, &strides, 0) |
| 113 | + } |
| 114 | + let kv_first_layout = layout([nh, att, dkv], [nh_skv, att_skv, dkv_skv]).transpose(&[2, 1]); |
| 115 | + let kr_layout = layout([nh, att, dr], [nh_skr, att_skr, dr_skr]).transpose(&[2, 1]); |
| 116 | + let a_layout = layout([nh, dv, dkv], [nh_sa, dv_sa, dkv_sa]).transpose(&[2, 1]); |
| 117 | + let o_layout = layout([nh, seq, dv], [nh_so, seq_so, dv_so]).transpose(&[1, 0]); |
| 118 | + let att_w_layout = TensorLayout::new_contiguous(dt, &[nh, seq, att]); |
| 119 | + let attn_t_layout = TensorLayout::new_contiguous(dt, &[nh, seq, dkv]); |
| 120 | + let att_w_size = nh * seq * att * ele; |
| 121 | + let att_t_size = nh * seq * dkv * ele; |
| 122 | + let mut workspace = Workspace::new(queue_alloc, workspace, att_w_size + att_t_size); |
| 123 | + let (att_w_buf, workspace) = workspace.split_at_mut(att_w_size); |
| 124 | + let (attn_t_buf, workspace) = workspace.split_at_mut(att_t_size); |
| 125 | + |
| 126 | + let kv_first_layout = |
| 127 | + TensorLayout::new(dt, kv_first_layout.shape(), kv_first_layout.strides()); |
| 128 | + let kr_layout = TensorLayout::new(dt, kr_layout.shape(), kr_layout.strides()); |
| 129 | + let a_layout = TensorLayout::new(dt, a_layout.shape(), a_layout.strides()); |
| 130 | + let o_layout = TensorLayout::new(dt, o_layout.shape(), o_layout.strides()); |
| 131 | + // att_w = qr*kr^T + q*kv^T |
| 132 | + self.mat_mul.launch( |
| 133 | + &mat_mul::Args { |
| 134 | + c_layout: att_w_layout.clone(), |
| 135 | + c_base: att_w_buf.as_mut_ptr(), |
| 136 | + beta: 0., |
| 137 | + a_layout: qr_layout.clone(), |
| 138 | + a_base: *qr_base, |
| 139 | + b_layout: kr_layout.clone(), |
| 140 | + b_base: *kr_base, |
| 141 | + alpha: ((dv + dr) as f32).sqrt().recip(), |
| 142 | + }, |
| 143 | + workspace, |
| 144 | + queue_alloc, |
| 145 | + )?; |
| 146 | + self.mat_mul.launch( |
| 147 | + &mat_mul::Args { |
| 148 | + c_layout: att_w_layout.clone(), |
| 149 | + c_base: att_w_buf.as_mut_ptr(), |
| 150 | + beta: 1., |
| 151 | + a_layout: q_layout.clone(), |
| 152 | + a_base: *q_base, |
| 153 | + b_layout: kv_first_layout.clone(), |
| 154 | + b_base: *kv_base, |
| 155 | + alpha: ((dv + dr) as f32).sqrt().recip(), |
| 156 | + }, |
| 157 | + workspace, |
| 158 | + queue_alloc, |
| 159 | + )?; |
| 160 | + |
| 161 | + // att_w = softmax(att) |
| 162 | + self.softmax.launch( |
| 163 | + &fuesd_softmax::Args { |
| 164 | + att_mask: *mask, |
| 165 | + att_layout: att_w_layout.clone(), |
| 166 | + att_base: att_w_buf.as_mut_ptr(), |
| 167 | + }, |
| 168 | + workspace, |
| 169 | + queue_alloc, |
| 170 | + )?; |
| 171 | + // attn_t=att_o*kv |
| 172 | + self.mat_mul.launch( |
| 173 | + &mat_mul::Args { |
| 174 | + c_layout: attn_t_layout.clone(), |
| 175 | + c_base: attn_t_buf.as_mut_ptr(), |
| 176 | + beta: 0., |
| 177 | + a_layout: att_w_layout.clone(), |
| 178 | + a_base: att_w_buf.as_ptr(), |
| 179 | + b_layout: kv_layout.clone(), |
| 180 | + b_base: *kv_base, |
| 181 | + alpha: 1., |
| 182 | + }, |
| 183 | + workspace, |
| 184 | + queue_alloc, |
| 185 | + )?; |
| 186 | + |
| 187 | + // attn =attn_t*absorb^T |
| 188 | + self.mat_mul.launch( |
| 189 | + &mat_mul::Args { |
| 190 | + c_layout: o_layout.clone(), |
| 191 | + c_base: *o_base, |
| 192 | + beta: 0., |
| 193 | + a_layout: attn_t_layout.clone(), |
| 194 | + a_base: attn_t_buf.as_ptr(), |
| 195 | + b_layout: a_layout.clone(), |
| 196 | + b_base: *absorb_base, |
| 197 | + alpha: 1., |
| 198 | + }, |
| 199 | + workspace, |
| 200 | + queue_alloc, |
| 201 | + )?; |
| 202 | + |
| 203 | + Ok(()) |
| 204 | + } |
| 205 | +} |
0 commit comments