@@ -596,8 +596,6 @@ bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
596
596
const Value *CondVal = BrInst.getCondition ();
597
597
MachineBasicBlock *Succ1MBB = &getMBB (*BrInst.getSuccessor (1 ));
598
598
599
- const auto &TLI = *MF->getSubtarget ().getTargetLowering ();
600
-
601
599
// If this is a series of conditions that are or'd or and'd together, emit
602
600
// this as a sequence of branches instead of setcc's with and/or operations.
603
601
// As long as jumps are not expensive (exceptions for multi-use logic ops,
@@ -617,7 +615,7 @@ bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
617
615
// jle foo
618
616
using namespace PatternMatch ;
619
617
const Instruction *CondI = dyn_cast<Instruction>(CondVal);
620
- if (!TLI. isJumpExpensive () && CondI && CondI->hasOneUse () &&
618
+ if (!TLI-> isJumpExpensive () && CondI && CondI->hasOneUse () &&
621
619
!BrInst.hasMetadata (LLVMContext::MD_unpredictable)) {
622
620
Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0 ;
623
621
Value *Vec;
@@ -1385,9 +1383,8 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1385
1383
return true ;
1386
1384
}
1387
1385
1388
- auto &TLI = *MF->getSubtarget ().getTargetLowering ();
1389
1386
MachineMemOperand::Flags Flags =
1390
- TLI. getLoadMemOperandFlags (LI, *DL, AC, LibInfo);
1387
+ TLI-> getLoadMemOperandFlags (LI, *DL, AC, LibInfo);
1391
1388
if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
1392
1389
if (AA->pointsToConstantMemory (
1393
1390
MemoryLocation (Ptr , LocationSize::precise (StoreSize), AAInfo))) {
@@ -1434,8 +1431,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1434
1431
return true ;
1435
1432
}
1436
1433
1437
- auto &TLI = *MF->getSubtarget ().getTargetLowering ();
1438
- MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags (SI, *DL);
1434
+ MachineMemOperand::Flags Flags = TLI->getStoreMemOperandFlags (SI, *DL);
1439
1435
1440
1436
for (unsigned i = 0 ; i < Vals.size (); ++i) {
1441
1437
Register Addr;
@@ -1779,8 +1775,7 @@ void IRTranslator::getStackGuard(Register DstReg,
1779
1775
auto MIB =
1780
1776
MIRBuilder.buildInstr (TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1781
1777
1782
- auto &TLI = *MF->getSubtarget ().getTargetLowering ();
1783
- Value *Global = TLI.getSDagStackGuard (*MF->getFunction ().getParent ());
1778
+ Value *Global = TLI->getSDagStackGuard (*MF->getFunction ().getParent ());
1784
1779
if (!Global)
1785
1780
return ;
1786
1781
@@ -2111,9 +2106,8 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
2111
2106
// does. Simplest intrinsic ever!
2112
2107
return true ;
2113
2108
case Intrinsic::vastart: {
2114
- auto &TLI = *MF->getSubtarget ().getTargetLowering ();
2115
2109
Value *Ptr = CI.getArgOperand (0 );
2116
- unsigned ListSize = TLI. getVaListSizeInBits (*DL) / 8 ;
2110
+ unsigned ListSize = TLI-> getVaListSizeInBits (*DL) / 8 ;
2117
2111
Align Alignment = getKnownAlignment (Ptr , *DL);
2118
2112
2119
2113
MIRBuilder.buildInstr (TargetOpcode::G_VASTART, {}, {getOrCreateVReg (*Ptr )})
@@ -2189,14 +2183,13 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
2189
2183
return translateFixedPointIntrinsic (TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2190
2184
case Intrinsic::fmuladd: {
2191
2185
const TargetMachine &TM = MF->getTarget ();
2192
- const TargetLowering &TLI = *MF->getSubtarget ().getTargetLowering ();
2193
2186
Register Dst = getOrCreateVReg (CI);
2194
2187
Register Op0 = getOrCreateVReg (*CI.getArgOperand (0 ));
2195
2188
Register Op1 = getOrCreateVReg (*CI.getArgOperand (1 ));
2196
2189
Register Op2 = getOrCreateVReg (*CI.getArgOperand (2 ));
2197
2190
if (TM.Options .AllowFPOpFusion != FPOpFusion::Strict &&
2198
- TLI. isFMAFasterThanFMulAndFAdd (*MF,
2199
- TLI. getValueType (*DL, CI.getType ()))) {
2191
+ TLI-> isFMAFasterThanFMulAndFAdd (*MF,
2192
+ TLI-> getValueType (*DL, CI.getType ()))) {
2200
2193
// TODO: Revisit this to see if we should move this part of the
2201
2194
// lowering to the combiner.
2202
2195
MIRBuilder.buildFMA (Dst, Op0, Op1, Op2,
@@ -2254,10 +2247,9 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
2254
2247
getStackGuard (getOrCreateVReg (CI), MIRBuilder);
2255
2248
return true ;
2256
2249
case Intrinsic::stackprotector: {
2257
- const TargetLowering &TLI = *MF->getSubtarget ().getTargetLowering ();
2258
2250
LLT PtrTy = getLLTForType (*CI.getArgOperand (0 )->getType (), *DL);
2259
2251
Register GuardVal;
2260
- if (TLI. useLoadStackGuardNode ()) {
2252
+ if (TLI-> useLoadStackGuardNode ()) {
2261
2253
GuardVal = MRI->createGenericVirtualRegister (PtrTy);
2262
2254
getStackGuard (GuardVal, MIRBuilder);
2263
2255
} else
@@ -2635,10 +2627,9 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2635
2627
}
2636
2628
2637
2629
// Add a MachineMemOperand if it is a target mem intrinsic.
2638
- const TargetLowering &TLI = *MF->getSubtarget ().getTargetLowering ();
2639
2630
TargetLowering::IntrinsicInfo Info;
2640
2631
// TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2641
- if (TLI. getTgtMemIntrinsic (Info, CI, *MF, ID)) {
2632
+ if (TLI-> getTgtMemIntrinsic (Info, CI, *MF, ID)) {
2642
2633
Align Alignment = Info.align .value_or (
2643
2634
DL->getABITypeAlign (Info.memVT .getTypeForEVT (F->getContext ())));
2644
2635
LLT MemTy = Info.memVT .isSimple ()
@@ -2818,10 +2809,9 @@ bool IRTranslator::translateLandingPad(const User &U,
2818
2809
2819
2810
// If there aren't registers to copy the values into (e.g., during SjLj
2820
2811
// exceptions), then don't bother.
2821
- auto &TLI = *MF->getSubtarget ().getTargetLowering ();
2822
2812
const Constant *PersonalityFn = MF->getFunction ().getPersonalityFn ();
2823
- if (TLI. getExceptionPointerRegister (PersonalityFn) == 0 &&
2824
- TLI. getExceptionSelectorRegister (PersonalityFn) == 0 )
2813
+ if (TLI-> getExceptionPointerRegister (PersonalityFn) == 0 &&
2814
+ TLI-> getExceptionSelectorRegister (PersonalityFn) == 0 )
2825
2815
return true ;
2826
2816
2827
2817
// If landingpad's return type is token type, we don't create DAG nodes
@@ -2852,15 +2842,15 @@ bool IRTranslator::translateLandingPad(const User &U,
2852
2842
assert (Tys.size () == 2 && " Only two-valued landingpads are supported" );
2853
2843
2854
2844
// Mark exception register as live in.
2855
- Register ExceptionReg = TLI. getExceptionPointerRegister (PersonalityFn);
2845
+ Register ExceptionReg = TLI-> getExceptionPointerRegister (PersonalityFn);
2856
2846
if (!ExceptionReg)
2857
2847
return false ;
2858
2848
2859
2849
MBB.addLiveIn (ExceptionReg);
2860
2850
ArrayRef<Register> ResRegs = getOrCreateVRegs (LP);
2861
2851
MIRBuilder.buildCopy (ResRegs[0 ], ExceptionReg);
2862
2852
2863
- Register SelectorReg = TLI. getExceptionSelectorRegister (PersonalityFn);
2853
+ Register SelectorReg = TLI-> getExceptionSelectorRegister (PersonalityFn);
2864
2854
if (!SelectorReg)
2865
2855
return false ;
2866
2856
@@ -2986,8 +2976,7 @@ bool IRTranslator::translateExtractElement(const User &U,
2986
2976
2987
2977
Register Res = getOrCreateVReg (U);
2988
2978
Register Val = getOrCreateVReg (*U.getOperand (0 ));
2989
- const auto &TLI = *MF->getSubtarget ().getTargetLowering ();
2990
- unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy (*DL).getSizeInBits ();
2979
+ unsigned PreferredVecIdxWidth = TLI->getVectorIdxTy (*DL).getSizeInBits ();
2991
2980
Register Idx;
2992
2981
if (auto *CI = dyn_cast<ConstantInt>(U.getOperand (1 ))) {
2993
2982
if (CI->getBitWidth () != PreferredVecIdxWidth) {
@@ -3039,8 +3028,7 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
3039
3028
MachineIRBuilder &MIRBuilder) {
3040
3029
const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
3041
3030
3042
- auto &TLI = *MF->getSubtarget ().getTargetLowering ();
3043
- auto Flags = TLI.getAtomicMemOperandFlags (I, *DL);
3031
+ auto Flags = TLI->getAtomicMemOperandFlags (I, *DL);
3044
3032
3045
3033
auto Res = getOrCreateVRegs (I);
3046
3034
Register OldValRes = Res[0 ];
@@ -3061,8 +3049,7 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
3061
3049
bool IRTranslator::translateAtomicRMW (const User &U,
3062
3050
MachineIRBuilder &MIRBuilder) {
3063
3051
const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
3064
- auto &TLI = *MF->getSubtarget ().getTargetLowering ();
3065
- auto Flags = TLI.getAtomicMemOperandFlags (I, *DL);
3052
+ auto Flags = TLI->getAtomicMemOperandFlags (I, *DL);
3066
3053
3067
3054
Register Res = getOrCreateVReg (I);
3068
3055
Register Addr = getOrCreateVReg (*I.getPointerOperand ());
@@ -3302,8 +3289,7 @@ bool IRTranslator::translate(const Instruction &Inst) {
3302
3289
CurBuilder->setDebugLoc (Inst.getDebugLoc ());
3303
3290
CurBuilder->setPCSections (Inst.getMetadata (LLVMContext::MD_pcsections));
3304
3291
3305
- auto &TLI = *MF->getSubtarget ().getTargetLowering ();
3306
- if (TLI.fallBackToDAGISel (Inst))
3292
+ if (TLI->fallBackToDAGISel (Inst))
3307
3293
return false ;
3308
3294
3309
3295
switch (Inst.getOpcode ()) {
@@ -3454,9 +3440,8 @@ bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3454
3440
// Check if we need to generate stack-protector guard checks.
3455
3441
StackProtector &SP = getAnalysis<StackProtector>();
3456
3442
if (SP.shouldEmitSDCheck (BB)) {
3457
- const TargetLowering &TLI = *MF->getSubtarget ().getTargetLowering ();
3458
3443
bool FunctionBasedInstrumentation =
3459
- TLI. getSSPStackGuardCheck (*MF->getFunction ().getParent ());
3444
+ TLI-> getSSPStackGuardCheck (*MF->getFunction ().getParent ());
3460
3445
SPDescriptor.initialize (&BB, &MBB, FunctionBasedInstrumentation);
3461
3446
}
3462
3447
// Handle stack protector.
@@ -3501,10 +3486,9 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3501
3486
MachineBasicBlock *ParentBB) {
3502
3487
CurBuilder->setInsertPt (*ParentBB, ParentBB->end ());
3503
3488
// First create the loads to the guard/stack slot for the comparison.
3504
- const TargetLowering &TLI = *MF->getSubtarget ().getTargetLowering ();
3505
3489
Type *PtrIRTy = PointerType::getUnqual (MF->getFunction ().getContext ());
3506
3490
const LLT PtrTy = getLLTForType (*PtrIRTy, *DL);
3507
- LLT PtrMemTy = getLLTForMVT (TLI. getPointerMemTy (*DL));
3491
+ LLT PtrMemTy = getLLTForMVT (TLI-> getPointerMemTy (*DL));
3508
3492
3509
3493
MachineFrameInfo &MFI = ParentBB->getParent ()->getFrameInfo ();
3510
3494
int FI = MFI.getStackProtectorIndex ();
@@ -3522,13 +3506,13 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3522
3506
MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile)
3523
3507
.getReg (0 );
3524
3508
3525
- if (TLI. useStackGuardXorFP ()) {
3509
+ if (TLI-> useStackGuardXorFP ()) {
3526
3510
LLVM_DEBUG (dbgs () << " Stack protector xor'ing with FP not yet implemented" );
3527
3511
return false ;
3528
3512
}
3529
3513
3530
3514
// Retrieve guard check function, nullptr if instrumentation is inlined.
3531
- if (const Function *GuardCheckFn = TLI. getSSPStackGuardCheck (M)) {
3515
+ if (const Function *GuardCheckFn = TLI-> getSSPStackGuardCheck (M)) {
3532
3516
// This path is currently untestable on GlobalISel, since the only platform
3533
3517
// that needs this seems to be Windows, and we fall back on that currently.
3534
3518
// The code still lives here in case that changes.
@@ -3563,13 +3547,13 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3563
3547
3564
3548
// If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3565
3549
// Otherwise, emit a volatile load to retrieve the stack guard value.
3566
- if (TLI. useLoadStackGuardNode ()) {
3550
+ if (TLI-> useLoadStackGuardNode ()) {
3567
3551
Guard =
3568
3552
MRI->createGenericVirtualRegister (LLT::scalar (PtrTy.getSizeInBits ()));
3569
3553
getStackGuard (Guard, *CurBuilder);
3570
3554
} else {
3571
3555
// TODO: test using android subtarget when we support @llvm.thread.pointer.
3572
- const Value *IRGuard = TLI. getSDagStackGuard (M);
3556
+ const Value *IRGuard = TLI-> getSDagStackGuard (M);
3573
3557
Register GuardPtr = getOrCreateVReg (*IRGuard);
3574
3558
3575
3559
Guard = CurBuilder
@@ -3593,13 +3577,12 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3593
3577
bool IRTranslator::emitSPDescriptorFailure (StackProtectorDescriptor &SPD,
3594
3578
MachineBasicBlock *FailureBB) {
3595
3579
CurBuilder->setInsertPt (*FailureBB, FailureBB->end ());
3596
- const TargetLowering &TLI = *MF->getSubtarget ().getTargetLowering ();
3597
3580
3598
3581
const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
3599
- const char *Name = TLI. getLibcallName (Libcall);
3582
+ const char *Name = TLI-> getLibcallName (Libcall);
3600
3583
3601
3584
CallLowering::CallLoweringInfo Info;
3602
- Info.CallConv = TLI. getLibcallCallingConv (Libcall);
3585
+ Info.CallConv = TLI-> getLibcallCallingConv (Libcall);
3603
3586
Info.Callee = MachineOperand::CreateES (Name);
3604
3587
Info.OrigRet = {Register (), Type::getVoidTy (MF->getFunction ().getContext ()),
3605
3588
0 };
@@ -3662,6 +3645,7 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3662
3645
bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences ()
3663
3646
? EnableCSEInIRTranslator
3664
3647
: TPC->isGISelCSEEnabled ();
3648
+ TLI = MF->getSubtarget ().getTargetLowering ();
3665
3649
3666
3650
if (EnableCSE) {
3667
3651
EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
@@ -3696,12 +3680,8 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3696
3680
LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI (F);
3697
3681
FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv (*MF);
3698
3682
3699
- const auto &TLI = *MF->getSubtarget ().getTargetLowering ();
3700
-
3701
3683
SL = std::make_unique<GISelSwitchLowering>(this , FuncInfo);
3702
- SL->init (TLI, TM, *DL);
3703
-
3704
-
3684
+ SL->init (*TLI, TM, *DL);
3705
3685
3706
3686
assert (PendingPHIs.empty () && " stale PHIs" );
3707
3687
0 commit comments