|
| 1 | +use crate::v2::{ |
| 2 | + asm::MissingMethodPatcher, cilnode::MethodKind, cilroot::BranchCond, BasicBlock, BinOp, |
| 3 | + CILNode, CILRoot, ClassRef, Int, MethodDef, MethodImpl, MethodRef, Type, |
| 4 | +}; |
| 5 | + |
| 6 | +use super::{ |
| 7 | + super::{Assembly, NodeIdx}, |
| 8 | + math::{int_max, int_min}, |
| 9 | +}; |
| 10 | + |
| 11 | +pub fn generate_atomic( |
| 12 | + asm: &mut Assembly, |
| 13 | + patcher: &mut MissingMethodPatcher, |
| 14 | + op_name: &str, |
| 15 | + op: impl Fn(&mut Assembly, NodeIdx, NodeIdx, Int) -> NodeIdx + 'static, |
| 16 | + int: Int, |
| 17 | +) { |
| 18 | + let name = asm.alloc_string(format!("atomic_{op_name}_{int}", int = int.name())); |
| 19 | + let generator = move |_, asm: &mut Assembly| { |
| 20 | + // Common ops |
| 21 | + let ldloc_0 = asm.alloc_node(CILNode::LdLoc(0)); |
| 22 | + let ldloc_1 = asm.alloc_node(CILNode::LdLoc(1)); |
| 23 | + let ldarg_0 = asm.alloc_node(CILNode::LdArg(0)); |
| 24 | + let ldarg_1 = asm.alloc_node(CILNode::LdArg(1)); |
| 25 | + // Types for which this atomic is implemented |
| 26 | + |
| 27 | + // The OP of this atomic |
| 28 | + let op = op(asm, ldloc_0, ldarg_1, int); |
| 29 | + |
| 30 | + let tpe = Type::Int(int); |
| 31 | + let tref = asm.nref(tpe); |
| 32 | + |
| 33 | + let cmpxchng_sig = asm.sig([tref, tpe, tpe], tpe); |
| 34 | + let interlocked = ClassRef::interlocked(asm); |
| 35 | + let interlocked = asm.alloc_class_ref(interlocked); |
| 36 | + let compare_exchange = asm.alloc_string("CompareExchange"); |
| 37 | + let mref = asm.alloc_methodref(MethodRef::new( |
| 38 | + interlocked, |
| 39 | + compare_exchange, |
| 40 | + cmpxchng_sig, |
| 41 | + MethodKind::Static, |
| 42 | + vec![].into(), |
| 43 | + )); |
| 44 | + let call = asm.alloc_node(CILNode::Call(Box::new(( |
| 45 | + mref, |
| 46 | + Box::new([ldarg_0, op, ldloc_0]), |
| 47 | + )))); |
| 48 | + |
| 49 | + let loop_block = vec![ |
| 50 | + asm.alloc_root(CILRoot::StLoc(0, ldloc_1)), |
| 51 | + asm.alloc_root(CILRoot::StLoc(1, call)), |
| 52 | + asm.alloc_root(CILRoot::Branch(Box::new(( |
| 53 | + 0, |
| 54 | + 0, |
| 55 | + Some(BranchCond::Ne(ldloc_0, ldloc_1)), |
| 56 | + )))), |
| 57 | + asm.alloc_root(CILRoot::Branch(Box::new((1, 0, None)))), |
| 58 | + ]; |
| 59 | + let exit_block = vec![asm.alloc_root(CILRoot::Ret(ldloc_0))]; |
| 60 | + MethodImpl::MethodBody { |
| 61 | + blocks: vec![ |
| 62 | + BasicBlock::new(loop_block, 0, None), |
| 63 | + BasicBlock::new(exit_block, 1, None), |
| 64 | + ], |
| 65 | + locals: vec![(None, asm.alloc_type(tpe)), (None, asm.alloc_type(tpe))], |
| 66 | + } |
| 67 | + }; |
| 68 | + patcher.insert(name, Box::new(generator)); |
| 69 | +} |
| 70 | +pub fn generate_atomic_for_ints( |
| 71 | + asm: &mut Assembly, |
| 72 | + patcher: &mut MissingMethodPatcher, |
| 73 | + op_name: &str, |
| 74 | + op: impl Fn(&mut Assembly, NodeIdx, NodeIdx, Int) -> NodeIdx + 'static + Clone, |
| 75 | +) { |
| 76 | + const ATOMIC_INTS: [Int; 6] = [ |
| 77 | + Int::U32, |
| 78 | + Int::U64, |
| 79 | + Int::USize, |
| 80 | + Int::I32, |
| 81 | + Int::I64, |
| 82 | + Int::ISize, |
| 83 | + ]; |
| 84 | + for int in ATOMIC_INTS { |
| 85 | + generate_atomic(asm, patcher, op_name, op.clone(), int) |
| 86 | + } |
| 87 | +} |
| 88 | +pub fn generate_all_atomics(asm: &mut Assembly, patcher: &mut MissingMethodPatcher) { |
| 89 | + // XOR |
| 90 | + generate_atomic_for_ints(asm, patcher, "xor", |asm, lhs, rhs, _| { |
| 91 | + asm.alloc_node(CILNode::BinOp(lhs, rhs, BinOp::XOr)) |
| 92 | + }); |
| 93 | + // NAND |
| 94 | + generate_atomic_for_ints(asm, patcher, "nand", |asm, lhs, rhs, _| { |
| 95 | + let and = asm.alloc_node(CILNode::BinOp(lhs, rhs, BinOp::And)); |
| 96 | + asm.alloc_node(CILNode::UnOp(and, crate::v2::cilnode::UnOp::Not)) |
| 97 | + }); |
| 98 | + // Max |
| 99 | + generate_atomic_for_ints(asm, patcher, "max", int_max); |
| 100 | + // Max |
| 101 | + generate_atomic_for_ints(asm, patcher, "min", int_min) |
| 102 | +} |
| 103 | +/* |
| 104 | + .method public hidebysig static |
| 105 | + uint32 atomic_xor ( |
| 106 | + uint32& addr, |
| 107 | + uint32 xorand |
| 108 | + ) cil managed |
| 109 | + { |
| 110 | + // Method begins at RVA 0x2050 |
| 111 | + // Code size 25 (0x19) |
| 112 | + .maxstack 3 |
| 113 | + .locals ( |
| 114 | + [0] uint32 addr_val, |
| 115 | + [1] uint32 got |
| 116 | + ) |
| 117 | +
|
| 118 | +
|
| 119 | + // loop start (head: IL_0013) |
| 120 | + IL_0006: ldloc.1 |
| 121 | + IL_0007: stloc.0 |
| 122 | +
|
| 123 | + IL_0008: ldarg.0 |
| 124 | + IL_0009: ldloc.0 |
| 125 | + IL_000a: ldarg.1 |
| 126 | + IL_000b: xor |
| 127 | + IL_000c: ldloc.0 |
| 128 | + IL_000d: call uint32 [System.Threading]System.Threading.Interlocked::CompareExchange(uint32&, uint32, uint32) |
| 129 | + IL_0012: stloc.1 |
| 130 | +
|
| 131 | + IL_0013: ldloc.0 |
| 132 | + IL_0014: ldloc.1 |
| 133 | + IL_0015: bne.un.s IL_0006 |
| 134 | + // end loop |
| 135 | + IL_0017: ldloc.0 |
| 136 | + IL_0018: ret |
| 137 | + } // end of method Tmp::atomic_xor |
| 138 | +
|
| 139 | +*/ |
0 commit comments