|
| 1 | +use crate::attention_mla_cached::args::Meta; |
| 2 | +use crate::attention_mla_cached::{Args, AttentionMLACached}; |
| 3 | +use crate::{ |
| 4 | + attention_mla, get_static, rearrange, shape_mismatch, ByteOf, Hardware, LaunchError, |
| 5 | + QueueAlloc, SchemeError, TensorLayout, |
| 6 | +}; |
| 7 | +use ndarray_layout::ArrayLayout; |
| 8 | +use std::marker::PhantomData; |
| 9 | + |
| 10 | +pub struct Operator<Hardware, Rearrange, Attention> { |
| 11 | + rearrange: Rearrange, |
| 12 | + attention: Attention, |
| 13 | + _phantom: PhantomData<Hardware>, |
| 14 | +} |
| 15 | + |
| 16 | +impl<H, R, A> AttentionMLACached<H> for Operator<H, R, A> |
| 17 | +where |
| 18 | + H: Hardware, |
| 19 | + R: rearrange::Rearrange<H>, |
| 20 | + A: attention_mla::AttentionMLA<H>, |
| 21 | +{ |
| 22 | +} |
| 23 | + |
| 24 | +impl<H, R, A> crate::Operator for Operator<H, R, A> |
| 25 | +where |
| 26 | + H: Hardware, |
| 27 | + R: rearrange::Rearrange<H>, |
| 28 | + A: attention_mla::AttentionMLA<H>, |
| 29 | +{ |
| 30 | + type Hardware = H; |
| 31 | + type TopoNode = H; |
| 32 | + type Args = crate::attention_mla_cached::Args<H>; |
| 33 | + fn new(node: &Self::TopoNode) -> Self { |
| 34 | + Self { |
| 35 | + rearrange: R::new(node), |
| 36 | + attention: A::new(node), |
| 37 | + _phantom: PhantomData, |
| 38 | + } |
| 39 | + } |
| 40 | + |
| 41 | + fn scheme( |
| 42 | + &mut self, |
| 43 | + args: &Self::Args, |
| 44 | + max_workspace_size: usize, |
| 45 | + ) -> Result<usize, SchemeError> { |
| 46 | + // TODO |
| 47 | + Ok(0) |
| 48 | + } |
| 49 | + |
| 50 | + fn launch<QA>( |
| 51 | + &self, |
| 52 | + args: &Self::Args, |
| 53 | + workspace: &mut [ByteOf<Self::Hardware>], |
| 54 | + queue_alloc: &QA, |
| 55 | + ) -> Result<(), LaunchError> |
| 56 | + where |
| 57 | + QA: QueueAlloc<Hardware = Self::Hardware>, |
| 58 | + { |
| 59 | + let Meta { |
| 60 | + dt, |
| 61 | + nh, |
| 62 | + seq, |
| 63 | + att, |
| 64 | + dkv, |
| 65 | + dv, |
| 66 | + dr, |
| 67 | + } = args.meta()?; |
| 68 | + let Args { |
| 69 | + q_layout, |
| 70 | + q_base, |
| 71 | + kv_layout, |
| 72 | + kv_base, |
| 73 | + absorb_layout, |
| 74 | + absorb_base, |
| 75 | + qr_layout, |
| 76 | + qr_base, |
| 77 | + kr_layout, |
| 78 | + kr_base, |
| 79 | + o_layout, |
| 80 | + o_base, |
| 81 | + kv_cache_layout, |
| 82 | + kv_cache_base, |
| 83 | + kr_cache_layout, |
| 84 | + kr_cache_base, |
| 85 | + mask, |
| 86 | + pos, |
| 87 | + } = args; |
| 88 | + let &[nh_skv, att_skv, dkv_skv] = kv_layout.strides() else { |
| 89 | + unreachable!() |
| 90 | + }; |
| 91 | + let &[nh_skr, att_skr, dr_skr] = kr_layout.strides() else { |
| 92 | + unreachable!() |
| 93 | + }; |
| 94 | + let &[nh_sa, dv_sa, dkv_sa] = absorb_layout.strides() else { |
| 95 | + unreachable!() |
| 96 | + }; |
| 97 | + |
| 98 | + let &[_, buf_kv, _] = kv_cache_layout.shape() else { |
| 99 | + unreachable!() |
| 100 | + }; |
| 101 | + let &[_, buf_kr, _] = kr_cache_layout.shape() else { |
| 102 | + unreachable!() |
| 103 | + }; |
| 104 | + let &[nh_skvc, buf_skvc, dh_skvc] = kv_cache_layout.strides() else { |
| 105 | + unreachable!() |
| 106 | + }; |
| 107 | + let &[nh_skrc, buf_skrc, dh_skrc] = kr_cache_layout.strides() else { |
| 108 | + unreachable!() |
| 109 | + }; |
| 110 | + let ele = dt.nbytes(); |
| 111 | + get_static! { |
| 112 | + nh seq dkv dr |
| 113 | + pos |
| 114 | + buf_kv buf_kr |
| 115 | + nh_skvc buf_skvc dh_skvc |
| 116 | + nh_skrc buf_skrc dh_skrc |
| 117 | + |
| 118 | + }; |
| 119 | + |
| 120 | + // 检查 cache 容量 |
| 121 | + let att = pos + seq; |
| 122 | + if buf_kr < att || buf_kv < att { |
| 123 | + return Err(shape_mismatch("Out of cache buffer").into()); |
| 124 | + } |
| 125 | + // 连接 kv cache |
| 126 | + #[inline(always)] |
| 127 | + fn layout(shape: [usize; 3], strides: [isize; 3]) -> ArrayLayout<3> { |
| 128 | + ArrayLayout::new(&shape, &strides, 0) |
| 129 | + } |
| 130 | + |
| 131 | + let kvc_layout = layout([nh, buf_kv, dkv], [nh_skvc, buf_skvc, dh_skvc]); |
| 132 | + let krc_layout = layout([nh, buf_kr, dr], [nh_skrc, buf_skrc, dh_skrc]); |
| 133 | + |
| 134 | + let kv_cat = kvc_layout.slice(1, pos, 1, seq); |
| 135 | + let kr_cat = krc_layout.slice(1, pos, 1, seq); |
| 136 | + |
| 137 | + self.rearrange.launch( |
| 138 | + &rearrange::Args { |
| 139 | + dst_layout: TensorLayout::new(dt, kv_cat.shape(), kv_cat.strides()), |
| 140 | + dst_base: unsafe { kv_cache_base.byte_add(kv_cat.offset() as _) }, |
| 141 | + src_layout: kv_layout.clone(), |
| 142 | + src_base: *kv_base, |
| 143 | + }, |
| 144 | + workspace, |
| 145 | + queue_alloc, |
| 146 | + )?; |
| 147 | + self.rearrange.launch( |
| 148 | + &rearrange::Args { |
| 149 | + dst_layout: TensorLayout::new(dt, kr_cat.shape(), kr_cat.strides()), |
| 150 | + dst_base: unsafe { kr_cache_base.byte_add(kr_cat.offset() as _) }, |
| 151 | + src_layout: kr_layout.clone(), |
| 152 | + src_base: *kr_base, |
| 153 | + }, |
| 154 | + workspace, |
| 155 | + queue_alloc, |
| 156 | + )?; |
| 157 | + // attention |
| 158 | + let kv_layout = kvc_layout.slice(1, 0, 1, att); |
| 159 | + let kr_layout = krc_layout.slice(1, 0, 1, att); |
| 160 | + assert_eq!(kv_layout.offset(), 0); |
| 161 | + assert_eq!(kr_layout.offset(), 0); |
| 162 | + self.attention.launch( |
| 163 | + &attention_mla::Args { |
| 164 | + mask: *mask, |
| 165 | + q_layout: q_layout.clone(), |
| 166 | + q_base: *q_base, |
| 167 | + kv_layout: TensorLayout::new(dt, kv_layout.shape(), kv_layout.strides()), |
| 168 | + kv_base: *kv_cache_base, |
| 169 | + kr_layout: TensorLayout::new(dt, kr_layout.shape(), kr_layout.strides()), |
| 170 | + kr_base: *kr_cache_base, |
| 171 | + absorb_layout: absorb_layout.clone(), |
| 172 | + absorb_base: *absorb_base, |
| 173 | + qr_layout: qr_layout.clone(), |
| 174 | + qr_base: *qr_base, |
| 175 | + o_layout: o_layout.clone(), |
| 176 | + o_base: *o_base, |
| 177 | + }, |
| 178 | + workspace, |
| 179 | + queue_alloc, |
| 180 | + )?; |
| 181 | + Ok(()) |
| 182 | + } |
| 183 | +} |
0 commit comments