Skip to content

Commit 2b54510

Browse files
authored
[GlobalISel] Add a TargetLowering variable to IRTranslator. NFC (#83009)
This prevents us from getting the variable multiple times.
1 parent 9617da8 commit 2b54510

File tree

2 files changed

+28
-47
lines changed

2 files changed

+28
-47
lines changed

llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -612,6 +612,7 @@ class IRTranslator : public MachineFunctionPass {
612612
AAResults *AA = nullptr;
613613
AssumptionCache *AC = nullptr;
614614
const TargetLibraryInfo *LibInfo = nullptr;
615+
const TargetLowering *TLI = nullptr;
615616
FunctionLoweringInfo FuncInfo;
616617

617618
// True when either the Target Machine specifies no optimizations or the

llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp

Lines changed: 27 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -596,8 +596,6 @@ bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
596596
const Value *CondVal = BrInst.getCondition();
597597
MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
598598

599-
const auto &TLI = *MF->getSubtarget().getTargetLowering();
600-
601599
// If this is a series of conditions that are or'd or and'd together, emit
602600
// this as a sequence of branches instead of setcc's with and/or operations.
603601
// As long as jumps are not expensive (exceptions for multi-use logic ops,
@@ -617,7 +615,7 @@ bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
617615
// jle foo
618616
using namespace PatternMatch;
619617
const Instruction *CondI = dyn_cast<Instruction>(CondVal);
620-
if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() &&
618+
if (!TLI->isJumpExpensive() && CondI && CondI->hasOneUse() &&
621619
!BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
622620
Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
623621
Value *Vec;
@@ -1385,9 +1383,8 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
13851383
return true;
13861384
}
13871385

1388-
auto &TLI = *MF->getSubtarget().getTargetLowering();
13891386
MachineMemOperand::Flags Flags =
1390-
TLI.getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1387+
TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
13911388
if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
13921389
if (AA->pointsToConstantMemory(
13931390
MemoryLocation(Ptr, LocationSize::precise(StoreSize), AAInfo))) {
@@ -1434,8 +1431,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
14341431
return true;
14351432
}
14361433

1437-
auto &TLI = *MF->getSubtarget().getTargetLowering();
1438-
MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
1434+
MachineMemOperand::Flags Flags = TLI->getStoreMemOperandFlags(SI, *DL);
14391435

14401436
for (unsigned i = 0; i < Vals.size(); ++i) {
14411437
Register Addr;
@@ -1779,8 +1775,7 @@ void IRTranslator::getStackGuard(Register DstReg,
17791775
auto MIB =
17801776
MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
17811777

1782-
auto &TLI = *MF->getSubtarget().getTargetLowering();
1783-
Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1778+
Value *Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());
17841779
if (!Global)
17851780
return;
17861781

@@ -2111,9 +2106,8 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
21112106
// does. Simplest intrinsic ever!
21122107
return true;
21132108
case Intrinsic::vastart: {
2114-
auto &TLI = *MF->getSubtarget().getTargetLowering();
21152109
Value *Ptr = CI.getArgOperand(0);
2116-
unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
2110+
unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
21172111
Align Alignment = getKnownAlignment(Ptr, *DL);
21182112

21192113
MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
@@ -2189,14 +2183,13 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
21892183
return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
21902184
case Intrinsic::fmuladd: {
21912185
const TargetMachine &TM = MF->getTarget();
2192-
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
21932186
Register Dst = getOrCreateVReg(CI);
21942187
Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
21952188
Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
21962189
Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
21972190
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2198-
TLI.isFMAFasterThanFMulAndFAdd(*MF,
2199-
TLI.getValueType(*DL, CI.getType()))) {
2191+
TLI->isFMAFasterThanFMulAndFAdd(*MF,
2192+
TLI->getValueType(*DL, CI.getType()))) {
22002193
// TODO: Revisit this to see if we should move this part of the
22012194
// lowering to the combiner.
22022195
MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
@@ -2254,10 +2247,9 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
22542247
getStackGuard(getOrCreateVReg(CI), MIRBuilder);
22552248
return true;
22562249
case Intrinsic::stackprotector: {
2257-
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
22582250
LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
22592251
Register GuardVal;
2260-
if (TLI.useLoadStackGuardNode()) {
2252+
if (TLI->useLoadStackGuardNode()) {
22612253
GuardVal = MRI->createGenericVirtualRegister(PtrTy);
22622254
getStackGuard(GuardVal, MIRBuilder);
22632255
} else
@@ -2635,10 +2627,9 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
26352627
}
26362628

26372629
// Add a MachineMemOperand if it is a target mem intrinsic.
2638-
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
26392630
TargetLowering::IntrinsicInfo Info;
26402631
// TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2641-
if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2632+
if (TLI->getTgtMemIntrinsic(Info, CI, *MF, ID)) {
26422633
Align Alignment = Info.align.value_or(
26432634
DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
26442635
LLT MemTy = Info.memVT.isSimple()
@@ -2818,10 +2809,9 @@ bool IRTranslator::translateLandingPad(const User &U,
28182809

28192810
// If there aren't registers to copy the values into (e.g., during SjLj
28202811
// exceptions), then don't bother.
2821-
auto &TLI = *MF->getSubtarget().getTargetLowering();
28222812
const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2823-
if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2824-
TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2813+
if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
2814+
TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
28252815
return true;
28262816

28272817
// If landingpad's return type is token type, we don't create DAG nodes
@@ -2852,15 +2842,15 @@ bool IRTranslator::translateLandingPad(const User &U,
28522842
assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
28532843

28542844
// Mark exception register as live in.
2855-
Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
2845+
Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
28562846
if (!ExceptionReg)
28572847
return false;
28582848

28592849
MBB.addLiveIn(ExceptionReg);
28602850
ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
28612851
MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
28622852

2863-
Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
2853+
Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
28642854
if (!SelectorReg)
28652855
return false;
28662856

@@ -2986,8 +2976,7 @@ bool IRTranslator::translateExtractElement(const User &U,
29862976

29872977
Register Res = getOrCreateVReg(U);
29882978
Register Val = getOrCreateVReg(*U.getOperand(0));
2989-
const auto &TLI = *MF->getSubtarget().getTargetLowering();
2990-
unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
2979+
unsigned PreferredVecIdxWidth = TLI->getVectorIdxTy(*DL).getSizeInBits();
29912980
Register Idx;
29922981
if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
29932982
if (CI->getBitWidth() != PreferredVecIdxWidth) {
@@ -3039,8 +3028,7 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
30393028
MachineIRBuilder &MIRBuilder) {
30403029
const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
30413030

3042-
auto &TLI = *MF->getSubtarget().getTargetLowering();
3043-
auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
3031+
auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
30443032

30453033
auto Res = getOrCreateVRegs(I);
30463034
Register OldValRes = Res[0];
@@ -3061,8 +3049,7 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
30613049
bool IRTranslator::translateAtomicRMW(const User &U,
30623050
MachineIRBuilder &MIRBuilder) {
30633051
const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
3064-
auto &TLI = *MF->getSubtarget().getTargetLowering();
3065-
auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
3052+
auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
30663053

30673054
Register Res = getOrCreateVReg(I);
30683055
Register Addr = getOrCreateVReg(*I.getPointerOperand());
@@ -3302,8 +3289,7 @@ bool IRTranslator::translate(const Instruction &Inst) {
33023289
CurBuilder->setDebugLoc(Inst.getDebugLoc());
33033290
CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));
33043291

3305-
auto &TLI = *MF->getSubtarget().getTargetLowering();
3306-
if (TLI.fallBackToDAGISel(Inst))
3292+
if (TLI->fallBackToDAGISel(Inst))
33073293
return false;
33083294

33093295
switch (Inst.getOpcode()) {
@@ -3454,9 +3440,8 @@ bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
34543440
// Check if we need to generate stack-protector guard checks.
34553441
StackProtector &SP = getAnalysis<StackProtector>();
34563442
if (SP.shouldEmitSDCheck(BB)) {
3457-
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
34583443
bool FunctionBasedInstrumentation =
3459-
TLI.getSSPStackGuardCheck(*MF->getFunction().getParent());
3444+
TLI->getSSPStackGuardCheck(*MF->getFunction().getParent());
34603445
SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
34613446
}
34623447
// Handle stack protector.
@@ -3501,10 +3486,9 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
35013486
MachineBasicBlock *ParentBB) {
35023487
CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
35033488
// First create the loads to the guard/stack slot for the comparison.
3504-
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
35053489
Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
35063490
const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
3507-
LLT PtrMemTy = getLLTForMVT(TLI.getPointerMemTy(*DL));
3491+
LLT PtrMemTy = getLLTForMVT(TLI->getPointerMemTy(*DL));
35083492

35093493
MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
35103494
int FI = MFI.getStackProtectorIndex();
@@ -3522,13 +3506,13 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
35223506
MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile)
35233507
.getReg(0);
35243508

3525-
if (TLI.useStackGuardXorFP()) {
3509+
if (TLI->useStackGuardXorFP()) {
35263510
LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
35273511
return false;
35283512
}
35293513

35303514
// Retrieve guard check function, nullptr if instrumentation is inlined.
3531-
if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
3515+
if (const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {
35323516
// This path is currently untestable on GlobalISel, since the only platform
35333517
// that needs this seems to be Windows, and we fall back on that currently.
35343518
// The code still lives here in case that changes.
@@ -3563,13 +3547,13 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
35633547

35643548
// If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
35653549
// Otherwise, emit a volatile load to retrieve the stack guard value.
3566-
if (TLI.useLoadStackGuardNode()) {
3550+
if (TLI->useLoadStackGuardNode()) {
35673551
Guard =
35683552
MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
35693553
getStackGuard(Guard, *CurBuilder);
35703554
} else {
35713555
// TODO: test using android subtarget when we support @llvm.thread.pointer.
3572-
const Value *IRGuard = TLI.getSDagStackGuard(M);
3556+
const Value *IRGuard = TLI->getSDagStackGuard(M);
35733557
Register GuardPtr = getOrCreateVReg(*IRGuard);
35743558

35753559
Guard = CurBuilder
@@ -3593,13 +3577,12 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
35933577
bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
35943578
MachineBasicBlock *FailureBB) {
35953579
CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
3596-
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
35973580

35983581
const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
3599-
const char *Name = TLI.getLibcallName(Libcall);
3582+
const char *Name = TLI->getLibcallName(Libcall);
36003583

36013584
CallLowering::CallLoweringInfo Info;
3602-
Info.CallConv = TLI.getLibcallCallingConv(Libcall);
3585+
Info.CallConv = TLI->getLibcallCallingConv(Libcall);
36033586
Info.Callee = MachineOperand::CreateES(Name);
36043587
Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
36053588
0};
@@ -3662,6 +3645,7 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
36623645
bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
36633646
? EnableCSEInIRTranslator
36643647
: TPC->isGISelCSEEnabled();
3648+
TLI = MF->getSubtarget().getTargetLowering();
36653649

36663650
if (EnableCSE) {
36673651
EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
@@ -3696,12 +3680,8 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
36963680
LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
36973681
FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
36983682

3699-
const auto &TLI = *MF->getSubtarget().getTargetLowering();
3700-
37013683
SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3702-
SL->init(TLI, TM, *DL);
3703-
3704-
3684+
SL->init(*TLI, TM, *DL);
37053685

37063686
assert(PendingPHIs.empty() && "stale PHIs");
37073687

0 commit comments

Comments
 (0)