@@ -143,11 +143,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
143
143
}
144
144
145
145
fn count_insn ( & self , category : & str ) {
146
- if self . cx ( ) . sess ( ) . codegen_stats ( ) {
147
- self . cx ( ) . stats . borrow_mut ( ) . n_llvm_insns += 1 ;
146
+ if self . sess ( ) . codegen_stats ( ) {
147
+ self . stats . borrow_mut ( ) . n_llvm_insns += 1 ;
148
148
}
149
- if self . cx ( ) . sess ( ) . count_llvm_insns ( ) {
150
- * self . cx ( ) . stats
149
+ if self . sess ( ) . count_llvm_insns ( ) {
150
+ * self . stats
151
151
. borrow_mut ( )
152
152
. llvm_insns
153
153
. entry ( category. to_string ( ) )
@@ -475,8 +475,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
475
475
use rustc:: ty:: { Int , Uint } ;
476
476
477
477
let new_sty = match ty. sty {
478
- Int ( Isize ) => Int ( self . cx ( ) . tcx . sess . target . isize_ty ) ,
479
- Uint ( Usize ) => Uint ( self . cx ( ) . tcx . sess . target . usize_ty ) ,
478
+ Int ( Isize ) => Int ( self . tcx . sess . target . isize_ty ) ,
479
+ Uint ( Usize ) => Uint ( self . tcx . sess . target . usize_ty ) ,
480
480
ref t @ Uint ( _) | ref t @ Int ( _) => t. clone ( ) ,
481
481
_ => panic ! ( "tried to get overflow intrinsic for op applied to non-int type" )
482
482
} ;
@@ -529,7 +529,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
529
529
} ,
530
530
} ;
531
531
532
- let intrinsic = self . cx ( ) . get_intrinsic ( & name) ;
532
+ let intrinsic = self . get_intrinsic ( & name) ;
533
533
let res = self . call ( intrinsic, & [ lhs, rhs] , None ) ;
534
534
(
535
535
self . extract_value ( res, 0 ) ,
@@ -637,7 +637,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
637
637
let vr = scalar. valid_range . clone ( ) ;
638
638
match scalar. value {
639
639
layout:: Int ( ..) => {
640
- let range = scalar. valid_range_exclusive ( bx. cx ( ) ) ;
640
+ let range = scalar. valid_range_exclusive ( bx) ;
641
641
if range. start != range. end {
642
642
bx. range_metadata ( load, range) ;
643
643
}
@@ -676,7 +676,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
676
676
let load = self . load ( llptr, align) ;
677
677
scalar_load_metadata ( self , load, scalar) ;
678
678
if scalar. is_bool ( ) {
679
- self . trunc ( load, self . cx ( ) . type_i1 ( ) )
679
+ self . trunc ( load, self . type_i1 ( ) )
680
680
} else {
681
681
load
682
682
}
@@ -696,7 +696,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
696
696
697
697
698
698
fn range_metadata ( & mut self , load : & ' ll Value , range : Range < u128 > ) {
699
- if self . cx ( ) . sess ( ) . target . target . arch == "amdgpu" {
699
+ if self . sess ( ) . target . target . arch == "amdgpu" {
700
700
// amdgpu/LLVM does something weird and thinks a i64 value is
701
701
// split into a v2i32, halving the bitwidth LLVM expects,
702
702
// tripping an assertion. So, for now, just disable this
@@ -942,7 +942,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
942
942
} ) . collect :: < Vec < _ > > ( ) ;
943
943
944
944
debug ! ( "Asm Output Type: {:?}" , output) ;
945
- let fty = self . cx ( ) . type_func ( & argtys[ ..] , output) ;
945
+ let fty = self . type_func ( & argtys[ ..] , output) ;
946
946
unsafe {
947
947
// Ask LLVM to verify that the constraints are well-formed.
948
948
let constraints_ok = llvm:: LLVMRustInlineAsmVerify ( fty, cons. as_ptr ( ) ) ;
@@ -970,14 +970,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
970
970
if flags. contains ( MemFlags :: NONTEMPORAL ) {
971
971
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
972
972
let val = self . load ( src, src_align) ;
973
- let ptr = self . pointercast ( dst, self . cx ( ) . type_ptr_to ( self . cx ( ) . val_ty ( val) ) ) ;
973
+ let ptr = self . pointercast ( dst, self . type_ptr_to ( self . val_ty ( val) ) ) ;
974
974
self . store_with_flags ( val, ptr, dst_align, flags) ;
975
975
return ;
976
976
}
977
- let size = self . intcast ( size, self . cx ( ) . type_isize ( ) , false ) ;
977
+ let size = self . intcast ( size, self . type_isize ( ) , false ) ;
978
978
let is_volatile = flags. contains ( MemFlags :: VOLATILE ) ;
979
- let dst = self . pointercast ( dst, self . cx ( ) . type_i8p ( ) ) ;
980
- let src = self . pointercast ( src, self . cx ( ) . type_i8p ( ) ) ;
979
+ let dst = self . pointercast ( dst, self . type_i8p ( ) ) ;
980
+ let src = self . pointercast ( src, self . type_i8p ( ) ) ;
981
981
unsafe {
982
982
llvm:: LLVMRustBuildMemCpy ( self . llbuilder , dst, dst_align. bytes ( ) as c_uint ,
983
983
src, src_align. bytes ( ) as c_uint , size, is_volatile) ;
@@ -990,14 +990,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
990
990
if flags. contains ( MemFlags :: NONTEMPORAL ) {
991
991
// HACK(nox): This is inefficient but there is no nontemporal memmove.
992
992
let val = self . load ( src, src_align) ;
993
- let ptr = self . pointercast ( dst, self . cx ( ) . type_ptr_to ( self . cx ( ) . val_ty ( val) ) ) ;
993
+ let ptr = self . pointercast ( dst, self . type_ptr_to ( self . val_ty ( val) ) ) ;
994
994
self . store_with_flags ( val, ptr, dst_align, flags) ;
995
995
return ;
996
996
}
997
- let size = self . intcast ( size, self . cx ( ) . type_isize ( ) , false ) ;
997
+ let size = self . intcast ( size, self . type_isize ( ) , false ) ;
998
998
let is_volatile = flags. contains ( MemFlags :: VOLATILE ) ;
999
- let dst = self . pointercast ( dst, self . cx ( ) . type_i8p ( ) ) ;
1000
- let src = self . pointercast ( src, self . cx ( ) . type_i8p ( ) ) ;
999
+ let dst = self . pointercast ( dst, self . type_i8p ( ) ) ;
1000
+ let src = self . pointercast ( src, self . type_i8p ( ) ) ;
1001
1001
unsafe {
1002
1002
llvm:: LLVMRustBuildMemMove ( self . llbuilder , dst, dst_align. bytes ( ) as c_uint ,
1003
1003
src, src_align. bytes ( ) as c_uint , size, is_volatile) ;
@@ -1012,12 +1012,12 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
1012
1012
align : Align ,
1013
1013
flags : MemFlags ,
1014
1014
) {
1015
- let ptr_width = & self . cx ( ) . sess ( ) . target . target . target_pointer_width ;
1015
+ let ptr_width = & self . sess ( ) . target . target . target_pointer_width ;
1016
1016
let intrinsic_key = format ! ( "llvm.memset.p0i8.i{}" , ptr_width) ;
1017
- let llintrinsicfn = self . cx ( ) . get_intrinsic ( & intrinsic_key) ;
1018
- let ptr = self . pointercast ( ptr, self . cx ( ) . type_i8p ( ) ) ;
1019
- let align = self . cx ( ) . const_u32 ( align. bytes ( ) as u32 ) ;
1020
- let volatile = self . cx ( ) . const_bool ( flags. contains ( MemFlags :: VOLATILE ) ) ;
1017
+ let llintrinsicfn = self . get_intrinsic ( & intrinsic_key) ;
1018
+ let ptr = self . pointercast ( ptr, self . type_i8p ( ) ) ;
1019
+ let align = self . const_u32 ( align. bytes ( ) as u32 ) ;
1020
+ let volatile = self . const_bool ( flags. contains ( MemFlags :: VOLATILE ) ) ;
1021
1021
self . call ( llintrinsicfn, & [ ptr, fill_byte, size, align, volatile] , None ) ;
1022
1022
}
1023
1023
@@ -1083,10 +1083,10 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
1083
1083
fn vector_splat ( & mut self , num_elts : usize , elt : & ' ll Value ) -> & ' ll Value {
1084
1084
unsafe {
1085
1085
let elt_ty = self . cx . val_ty ( elt) ;
1086
- let undef = llvm:: LLVMGetUndef ( self . cx ( ) . type_vector ( elt_ty, num_elts as u64 ) ) ;
1086
+ let undef = llvm:: LLVMGetUndef ( self . type_vector ( elt_ty, num_elts as u64 ) ) ;
1087
1087
let vec = self . insert_element ( undef, elt, self . cx . const_i32 ( 0 ) ) ;
1088
- let vec_i32_ty = self . cx ( ) . type_vector ( self . cx ( ) . type_i32 ( ) , num_elts as u64 ) ;
1089
- self . shuffle_vector ( vec, undef, self . cx ( ) . const_null ( vec_i32_ty) )
1088
+ let vec_i32_ty = self . type_vector ( self . type_i32 ( ) , num_elts as u64 ) ;
1089
+ self . shuffle_vector ( vec, undef, self . const_null ( vec_i32_ty) )
1090
1090
}
1091
1091
}
1092
1092
@@ -1397,7 +1397,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
1397
1397
let param_tys = self . cx . func_params_types ( fn_ty) ;
1398
1398
1399
1399
let all_args_match = param_tys. iter ( )
1400
- . zip ( args. iter ( ) . map ( |& v| self . cx ( ) . val_ty ( v) ) )
1400
+ . zip ( args. iter ( ) . map ( |& v| self . val_ty ( v) ) )
1401
1401
. all ( |( expected_ty, actual_ty) | * expected_ty == actual_ty) ;
1402
1402
1403
1403
if all_args_match {
@@ -1408,7 +1408,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
1408
1408
. zip ( args. iter ( ) )
1409
1409
. enumerate ( )
1410
1410
. map ( |( i, ( expected_ty, & actual_val) ) | {
1411
- let actual_ty = self . cx ( ) . val_ty ( actual_val) ;
1411
+ let actual_ty = self . val_ty ( actual_val) ;
1412
1412
if expected_ty != actual_ty {
1413
1413
debug ! ( "Type mismatch in function call of {:?}. \
1414
1414
Expected {:?} for param {}, got {:?}; injecting bitcast",
0 commit comments