@@ -14,7 +14,7 @@ use smallvec::SmallVec;
14
14
use tracing:: debug;
15
15
16
16
use crate :: builder:: Builder ;
17
- use crate :: common:: { AsCCharPtr , Funclet } ;
17
+ use crate :: common:: Funclet ;
18
18
use crate :: context:: CodegenCx ;
19
19
use crate :: type_:: Type ;
20
20
use crate :: type_of:: LayoutLlvmExt ;
@@ -435,13 +435,7 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
435
435
template_str. push_str ( "\n .att_syntax\n " ) ;
436
436
}
437
437
438
- unsafe {
439
- llvm:: LLVMAppendModuleInlineAsm (
440
- self . llmod ,
441
- template_str. as_c_char_ptr ( ) ,
442
- template_str. len ( ) ,
443
- ) ;
444
- }
438
+ llvm:: append_module_inline_asm ( self . llmod , template_str. as_bytes ( ) ) ;
445
439
}
446
440
447
441
fn mangled_name ( & self , instance : Instance < ' tcx > ) -> String {
@@ -482,67 +476,67 @@ pub(crate) fn inline_asm_call<'ll>(
482
476
483
477
debug ! ( "Asm Output Type: {:?}" , output) ;
484
478
let fty = bx. cx . type_func ( & argtys, output) ;
479
+
485
480
// Ask LLVM to verify that the constraints are well-formed.
486
- let constraints_ok =
487
- unsafe { llvm:: LLVMRustInlineAsmVerify ( fty, cons. as_c_char_ptr ( ) , cons. len ( ) ) } ;
481
+ let constraints_ok = unsafe { llvm:: LLVMRustInlineAsmVerify ( fty, cons. as_ptr ( ) , cons. len ( ) ) } ;
488
482
debug ! ( "constraint verification result: {:?}" , constraints_ok) ;
489
- if constraints_ok {
490
- let v = unsafe {
491
- llvm:: LLVMRustInlineAsm (
492
- fty,
493
- asm. as_c_char_ptr ( ) ,
494
- asm. len ( ) ,
495
- cons. as_c_char_ptr ( ) ,
496
- cons. len ( ) ,
497
- volatile,
498
- alignstack,
499
- dia,
500
- can_throw,
501
- )
502
- } ;
483
+ if !constraints_ok {
484
+ // LLVM has detected an issue with our constraints, so bail out.
485
+ return None ;
486
+ }
503
487
504
- let call = if !labels. is_empty ( ) {
505
- assert ! ( catch_funclet. is_none( ) ) ;
506
- bx. callbr ( fty, None , None , v, inputs, dest. unwrap ( ) , labels, None , None )
507
- } else if let Some ( ( catch, funclet) ) = catch_funclet {
508
- bx. invoke ( fty, None , None , v, inputs, dest. unwrap ( ) , catch, funclet, None )
509
- } else {
510
- bx. call ( fty, None , None , v, inputs, None , None )
511
- } ;
488
+ let v = unsafe {
489
+ llvm:: LLVMGetInlineAsm (
490
+ fty,
491
+ asm. as_ptr ( ) ,
492
+ asm. len ( ) ,
493
+ cons. as_ptr ( ) ,
494
+ cons. len ( ) ,
495
+ volatile,
496
+ alignstack,
497
+ dia,
498
+ can_throw,
499
+ )
500
+ } ;
512
501
513
- // Store mark in a metadata node so we can map LLVM errors
514
- // back to source locations. See #17552.
515
- let key = "srcloc" ;
516
- let kind = bx. get_md_kind_id ( key) ;
502
+ let call = if !labels. is_empty ( ) {
503
+ assert ! ( catch_funclet. is_none( ) ) ;
504
+ bx. callbr ( fty, None , None , v, inputs, dest. unwrap ( ) , labels, None , None )
505
+ } else if let Some ( ( catch, funclet) ) = catch_funclet {
506
+ bx. invoke ( fty, None , None , v, inputs, dest. unwrap ( ) , catch, funclet, None )
507
+ } else {
508
+ bx. call ( fty, None , None , v, inputs, None , None )
509
+ } ;
517
510
518
- // `srcloc` contains one 64-bit integer for each line of assembly code,
519
- // where the lower 32 bits hold the lo byte position and the upper 32 bits
520
- // hold the hi byte position.
521
- let mut srcloc = vec ! [ ] ;
522
- if dia == llvm:: AsmDialect :: Intel && line_spans. len ( ) > 1 {
523
- // LLVM inserts an extra line to add the ".intel_syntax", so add
524
- // a dummy srcloc entry for it.
525
- //
526
- // Don't do this if we only have 1 line span since that may be
527
- // due to the asm template string coming from a macro. LLVM will
528
- // default to the first srcloc for lines that don't have an
529
- // associated srcloc.
530
- srcloc. push ( llvm:: LLVMValueAsMetadata ( bx. const_u64 ( 0 ) ) ) ;
531
- }
532
- srcloc. extend ( line_spans. iter ( ) . map ( |span| {
533
- llvm:: LLVMValueAsMetadata (
534
- bx. const_u64 ( u64:: from ( span. lo ( ) . to_u32 ( ) ) | ( u64:: from ( span. hi ( ) . to_u32 ( ) ) << 32 ) ) ,
535
- )
536
- } ) ) ;
537
- let md = unsafe { llvm:: LLVMMDNodeInContext2 ( bx. llcx , srcloc. as_ptr ( ) , srcloc. len ( ) ) } ;
538
- let md = bx. get_metadata_value ( md) ;
539
- llvm:: LLVMSetMetadata ( call, kind, md) ;
511
+ // Store mark in a metadata node so we can map LLVM errors
512
+ // back to source locations. See #17552.
513
+ let key = "srcloc" ;
514
+ let kind = bx. get_md_kind_id ( key) ;
540
515
541
- Some ( call)
542
- } else {
543
- // LLVM has detected an issue with our constraints, bail out
544
- None
516
+ // `srcloc` contains one 64-bit integer for each line of assembly code,
517
+ // where the lower 32 bits hold the lo byte position and the upper 32 bits
518
+ // hold the hi byte position.
519
+ let mut srcloc = vec ! [ ] ;
520
+ if dia == llvm:: AsmDialect :: Intel && line_spans. len ( ) > 1 {
521
+ // LLVM inserts an extra line to add the ".intel_syntax", so add
522
+ // a dummy srcloc entry for it.
523
+ //
524
+ // Don't do this if we only have 1 line span since that may be
525
+ // due to the asm template string coming from a macro. LLVM will
526
+ // default to the first srcloc for lines that don't have an
527
+ // associated srcloc.
528
+ srcloc. push ( llvm:: LLVMValueAsMetadata ( bx. const_u64 ( 0 ) ) ) ;
545
529
}
530
+ srcloc. extend ( line_spans. iter ( ) . map ( |span| {
531
+ llvm:: LLVMValueAsMetadata (
532
+ bx. const_u64 ( u64:: from ( span. lo ( ) . to_u32 ( ) ) | ( u64:: from ( span. hi ( ) . to_u32 ( ) ) << 32 ) ) ,
533
+ )
534
+ } ) ) ;
535
+ let md = unsafe { llvm:: LLVMMDNodeInContext2 ( bx. llcx , srcloc. as_ptr ( ) , srcloc. len ( ) ) } ;
536
+ let md = bx. get_metadata_value ( md) ;
537
+ llvm:: LLVMSetMetadata ( call, kind, md) ;
538
+
539
+ Some ( call)
546
540
}
547
541
548
542
/// If the register is an xmm/ymm/zmm register then return its index.
0 commit comments