Skip to content

[LoopVectorizer] Bundle partial reductions with different extensions #136997

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 33 additions & 25 deletions llvm/lib/Transforms/Vectorize/VPlan.h
Original file line number Diff line number Diff line change
@@ -2689,11 +2689,6 @@ class VPExtendedReductionRecipe : public VPReductionRecipe {
/// and needs to be lowered to concrete recipes before codegen. The operands are
/// {ChainOp, VecOp1, VecOp2, [Condition]}.
class VPMulAccumulateReductionRecipe : public VPReductionRecipe {
/// Opcode of the extend for VecOp1 and VecOp2.
Instruction::CastOps ExtOp;

/// Non-neg flag of the extend recipe.
bool IsNonNeg = false;

/// The scalar type after extending.
Type *ResultTy = nullptr;
@@ -2710,11 +2705,13 @@ class VPMulAccumulateReductionRecipe : public VPReductionRecipe {
MulAcc->getCondOp(), MulAcc->isOrdered(),
WrapFlagsTy(MulAcc->hasNoUnsignedWrap(), MulAcc->hasNoSignedWrap()),
MulAcc->getDebugLoc()),
ExtOp(MulAcc->getExtOpcode()), IsNonNeg(MulAcc->isNonNeg()),
ResultTy(MulAcc->getResultType()),
VFScaleFactor(MulAcc->getVFScaleFactor()) {
VFScaleFactor(MulAcc->getVFScaleFactor()),
VecOpInfo{MulAcc->getVecOp0Info(), MulAcc->getVecOp1Info()} {
transferFlags(*MulAcc);
setUnderlyingValue(MulAcc->getUnderlyingValue());
VecOpInfo[0] = MulAcc->getVecOp0Info();
VecOpInfo[1] = MulAcc->getVecOp1Info();
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably a stupid question because I'm not familiar with VPlan, but is there a reason why this isn't a more standard copy constructor, i.e. taking a const VPMulAccumulateReductionRecipe & as parameter?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I actually don't know, this is just how the other recipes clone. I haven't used copy constructors myself but I can investigate.


public:
@@ -2728,19 +2725,20 @@ class VPMulAccumulateReductionRecipe : public VPReductionRecipe {
R->getCondOp(), R->isOrdered(),
WrapFlagsTy(Mul->hasNoUnsignedWrap(), Mul->hasNoSignedWrap()),
R->getDebugLoc()),
ExtOp(Ext0->getOpcode()), ResultTy(ResultTy),
VFScaleFactor(ScaleFactor) {
ResultTy(ResultTy), VFScaleFactor(ScaleFactor),
VecOpInfo{
{Ext0->getOpcode(), Ext0->hasNonNegFlag() && Ext0->isNonNeg()},
{Ext1->getOpcode(), Ext1->hasNonNegFlag() && Ext1->isNonNeg()}} {
assert(RecurrenceDescriptor::getOpcode(getRecurrenceKind()) ==
Instruction::Add &&
"The reduction instruction in MulAccumulateteReductionRecipe must "
"be Add");
assert((ExtOp == Instruction::CastOps::ZExt ||
ExtOp == Instruction::CastOps::SExt) &&
"VPMulAccumulateReductionRecipe only supports zext and sext.");
setUnderlyingValue(R->getUnderlyingValue());
// Only set the non-negative flag if the original recipe contains.
if (Ext0->hasNonNegFlag())
IsNonNeg = Ext0->isNonNeg();
assert(((Ext0->getOpcode() == Instruction::CastOps::ZExt ||
Ext0->getOpcode() == Instruction::CastOps::SExt) &&
(Ext1->getOpcode() == Instruction::CastOps::ZExt ||
Ext1->getOpcode() == Instruction::CastOps::SExt)) &&
"VPMulAccumulateReductionRecipe only supports zext and sext.");
}

VPMulAccumulateReductionRecipe(VPReductionRecipe *R, VPWidenRecipe *Mul,
@@ -2751,14 +2749,21 @@ class VPMulAccumulateReductionRecipe : public VPReductionRecipe {
R->getCondOp(), R->isOrdered(),
WrapFlagsTy(Mul->hasNoUnsignedWrap(), Mul->hasNoSignedWrap()),
R->getDebugLoc()),
ExtOp(Instruction::CastOps::CastOpsEnd), ResultTy(ResultTy) {
ResultTy(ResultTy) {
assert(RecurrenceDescriptor::getOpcode(getRecurrenceKind()) ==
Instruction::Add &&
"The reduction instruction in MulAccumulateReductionRecipe must be "
"Add");
setUnderlyingValue(R->getUnderlyingValue());
}

struct VecOperandInfo {
/// The operand's extend opcode.
Instruction::CastOps ExtOp{Instruction::CastOps::CastOpsEnd};
/// Non-neg portion of the operand's flags.
bool IsNonNeg = false;
};

~VPMulAccumulateReductionRecipe() override = default;

VPMulAccumulateReductionRecipe *clone() override {
@@ -2792,20 +2797,23 @@ class VPMulAccumulateReductionRecipe : public VPReductionRecipe {
VPValue *getVecOp1() const { return getOperand(2); }

/// Return true if this recipe contains extended operands.
bool isExtended() const { return ExtOp != Instruction::CastOps::CastOpsEnd; }

/// Return the opcode of the extends for the operands.
Instruction::CastOps getExtOpcode() const { return ExtOp; }

/// Return if the operands are zero-extended.
bool isZExt() const { return ExtOp == Instruction::CastOps::ZExt; }
bool isExtended() const {
return getVecOp0Info().ExtOp != Instruction::CastOps::CastOpsEnd ||
getVecOp1Info().ExtOp != Instruction::CastOps::CastOpsEnd;
}

/// Return true if the operand extends have the non-negative flag.
bool isNonNeg() const { return IsNonNeg; }
/// Return if the operands of mul instruction come from same extend.
bool isSameExtendVal() const { return getVecOp0() == getVecOp1(); }

/// Return the scaling factor that the VF is divided by to form the recipe's
/// output
unsigned getVFScaleFactor() const { return VFScaleFactor; }

const VecOperandInfo &getVecOp0Info() const { return VecOpInfo[0]; }
const VecOperandInfo &getVecOp1Info() const { return VecOpInfo[1]; }

protected:
VecOperandInfo VecOpInfo[2];
};

/// VPReplicateRecipe replicates a given instruction producing multiple scalar
28 changes: 17 additions & 11 deletions llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
Original file line number Diff line number Diff line change
@@ -2572,19 +2572,22 @@ VPExtendedReductionRecipe::computeCost(ElementCount VF,
InstructionCost
VPMulAccumulateReductionRecipe::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
VecOperandInfo Op0Info = getVecOp0Info();
VecOperandInfo Op1Info = getVecOp1Info();
if (getVFScaleFactor() > 1) {
return Ctx.TTI.getPartialReductionCost(
Instruction::Add, Ctx.Types.inferScalarType(getVecOp0()),
Ctx.Types.inferScalarType(getVecOp1()), getResultType(), VF,
TTI::getPartialReductionExtendKind(getExtOpcode()),
TTI::getPartialReductionExtendKind(getExtOpcode()), Instruction::Mul);
TTI::getPartialReductionExtendKind(Op0Info.ExtOp),
TTI::getPartialReductionExtendKind(Op1Info.ExtOp), Instruction::Mul);
}

Type *RedTy = Ctx.Types.inferScalarType(this);
auto *SrcVecTy =
cast<VectorType>(toVectorTy(Ctx.Types.inferScalarType(getVecOp0()), VF));
return Ctx.TTI.getMulAccReductionCost(isZExt(), RedTy, SrcVecTy,
Ctx.CostKind);
return Ctx.TTI.getMulAccReductionCost(Op0Info.ExtOp ==
Instruction::CastOps::ZExt,
RedTy, SrcVecTy, Ctx.CostKind);
}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -2653,6 +2656,8 @@ void VPExtendedReductionRecipe::print(raw_ostream &O, const Twine &Indent,

void VPMulAccumulateReductionRecipe::print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const {
VecOperandInfo Op0Info = getVecOp0Info();
VecOperandInfo Op1Info = getVecOp1Info();
O << Indent << "MULACC-REDUCE ";
printAsOperand(O, SlotTracker);
O << " = ";
@@ -2669,15 +2674,16 @@ void VPMulAccumulateReductionRecipe::print(raw_ostream &O, const Twine &Indent,
if (isExtended())
O << "(";
getVecOp0()->printAsOperand(O, SlotTracker);
if (isExtended())
O << " " << Instruction::getOpcodeName(ExtOp) << " to " << *getResultType()
<< "), (";
else
if (isExtended()) {
O << " " << Instruction::getOpcodeName(Op0Info.ExtOp) << " to "
<< *getResultType() << "), (";
} else
O << ", ";
getVecOp1()->printAsOperand(O, SlotTracker);
if (isExtended())
O << " " << Instruction::getOpcodeName(ExtOp) << " to " << *getResultType()
<< ")";
if (isExtended()) {
O << " " << Instruction::getOpcodeName(Op1Info.ExtOp) << " to "
<< *getResultType() << ")";
}
if (isConditional()) {
O << ", ";
getCondOp()->printAsOperand(O, SlotTracker);
36 changes: 19 additions & 17 deletions llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
Original file line number Diff line number Diff line change
@@ -30,6 +30,7 @@
#include "llvm/Analysis/InstSimplifyFolder.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Casting.h"
@@ -2545,28 +2546,31 @@ expandVPMulAccumulateReduction(VPMulAccumulateReductionRecipe *MulAcc) {
// reduce.add(ext(mul(ext, ext))) to reduce.add(mul(ext, ext)).
VPValue *Op0, *Op1;
if (MulAcc->isExtended()) {
VPMulAccumulateReductionRecipe::VecOperandInfo Op0Info =
MulAcc->getVecOp0Info();
VPMulAccumulateReductionRecipe::VecOperandInfo Op1Info =
MulAcc->getVecOp1Info();
Type *RedTy = MulAcc->getResultType();
if (MulAcc->isZExt())
Op0 = new VPWidenCastRecipe(
MulAcc->getExtOpcode(), MulAcc->getVecOp0(), RedTy,
VPIRFlags::NonNegFlagsTy(MulAcc->isNonNeg()), MulAcc->getDebugLoc());
if (Op0Info.ExtOp == Instruction::CastOps::ZExt)
Op0 = new VPWidenCastRecipe(Op0Info.ExtOp, MulAcc->getVecOp0(), RedTy,
VPIRFlags::NonNegFlagsTy(Op0Info.IsNonNeg),
MulAcc->getDebugLoc());
else
Op0 = new VPWidenCastRecipe(MulAcc->getExtOpcode(), MulAcc->getVecOp0(),
RedTy, {}, MulAcc->getDebugLoc());
Op0 = new VPWidenCastRecipe(Op0Info.ExtOp, MulAcc->getVecOp0(), RedTy, {},
MulAcc->getDebugLoc());
Op0->getDefiningRecipe()->insertBefore(MulAcc);
// Prevent reduce.add(mul(ext(A), ext(A))) generate duplicate
// VPWidenCastRecipe.
if (MulAcc->getVecOp0() == MulAcc->getVecOp1()) {
Op1 = Op0;
} else {
if (MulAcc->isZExt())
Op1 = new VPWidenCastRecipe(
MulAcc->getExtOpcode(), MulAcc->getVecOp1(), RedTy,
VPIRFlags::NonNegFlagsTy(MulAcc->isNonNeg()),
MulAcc->getDebugLoc());
if (Op1Info.ExtOp == Instruction::CastOps::ZExt)
Op1 = new VPWidenCastRecipe(Op1Info.ExtOp, MulAcc->getVecOp1(), RedTy,
VPIRFlags::NonNegFlagsTy(Op1Info.IsNonNeg),
MulAcc->getDebugLoc());
else
Op1 = new VPWidenCastRecipe(MulAcc->getExtOpcode(), MulAcc->getVecOp1(),
RedTy, {}, MulAcc->getDebugLoc());
Op1 = new VPWidenCastRecipe(Op1Info.ExtOp, MulAcc->getVecOp1(), RedTy,
{}, MulAcc->getDebugLoc());
Op1->getDefiningRecipe()->insertBefore(MulAcc);
}
} else {
@@ -2933,10 +2937,8 @@ tryToCreateAbstractPartialReductionRecipe(VPPartialReductionRecipe *PRed) {
auto *BinOpR = cast<VPWidenRecipe>(BinOp->getDefiningRecipe());
VPWidenCastRecipe *Ext0R = dyn_cast<VPWidenCastRecipe>(BinOpR->getOperand(0));
VPWidenCastRecipe *Ext1R = dyn_cast<VPWidenCastRecipe>(BinOpR->getOperand(1));

// TODO: Make work with extends of different signedness
if (Ext0R->hasMoreThanOneUniqueUser() || Ext1R->hasMoreThanOneUniqueUser() ||
Ext0R->getOpcode() != Ext1R->getOpcode())
if (!Ext0R || Ext0R->hasMoreThanOneUniqueUser() || !Ext1R ||
Ext1R->hasMoreThanOneUniqueUser())
return;

auto *AbstractR = new VPMulAccumulateReductionRecipe(

Large diffs are not rendered by default.

19 changes: 8 additions & 11 deletions llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
Original file line number Diff line number Diff line change
@@ -6,7 +6,7 @@ target triple = "aarch64-none-unknown-elf"

; Tests for printing VPlans that are enabled under AArch64

define i32 @print_partial_reduction(ptr %a, ptr %b) {
define i32 @print_partial_reduction_sext_zext(ptr %a, ptr %b) {
; CHECK: VPlan 'Initial VPlan for VF={8,16},UF>=1' {
; CHECK-NEXT: Live-in vp<[[VF:%.]]> = VF
; CHECK-NEXT: Live-in vp<[[VFxUF:%.]]> = VF * UF
@@ -27,13 +27,10 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: vp<[[PTR_A:%.+]]> = vector-pointer ir<%gep.a>
; CHECK-NEXT: WIDEN ir<%load.a> = load vp<[[PTR_A]]>
; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = sext ir<%load.a> to i32
; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: vp<[[PTR_B:%.+]]> = vector-pointer ir<%gep.b>
; CHECK-NEXT: WIDEN ir<%load.b> = load vp<[[PTR_B]]>
; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32
; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a>
; CHECK-NEXT: PARTIAL-REDUCE ir<[[REDUCE]]> = add ir<[[ACC]]>, ir<%mul>
; CHECK-NEXT: MULACC-REDUCE ir<[[REDUCE]]> = ir<%accum> + partial.reduce.add (mul (ir<%load.b> zext to i32), (ir<%load.a> sext to i32))
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VEC_TC]]>
; CHECK-NEXT: No successors
@@ -87,23 +84,23 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-EMPTY:
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT-SCALAR vp<[[EP_IV:%.+]]> = phi [ ir<0>, ir-bb<vector.ph> ], [ vp<%index.next>, vector.body ]
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi ir<0>, ir<%add> (VF scaled by 1/4)
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi ir<0>, vp<[[REDUCE:%.+]]> (VF scaled by 1/4)
; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<[[EP_IV]]>
; CHECK-NEXT: vp<[[PTR_A:%.+]]> = vector-pointer ir<%gep.a>
; CHECK-NEXT: WIDEN ir<%load.a> = load vp<[[PTR_A]]>
; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = sext ir<%load.a> to i32
; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<[[EP_IV]]>
; CHECK-NEXT: vp<[[PTR_B:%.+]]> = vector-pointer ir<%gep.b>
; CHECK-NEXT: WIDEN ir<%load.b> = load vp<[[PTR_B]]>
; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32
; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a>
; CHECK-NEXT: PARTIAL-REDUCE ir<%add> = add ir<%accum>, ir<%mul>
; CHECK-NEXT: WIDEN-CAST vp<[[EXTB:%.+]]> = zext ir<%load.b> to i32
; CHECK-NEXT: WIDEN-CAST vp<[[EXTA:%.+]]> = sext ir<%load.a> to i32
; CHECK-NEXT: WIDEN vp<[[MUL:%.+]]> = mul vp<[[EXTB]]>, vp<[[EXTA]]>
; CHECK-NEXT: PARTIAL-REDUCE vp<[[REDUCE]]> = add ir<%accum>, vp<[[MUL]]>
; CHECK-NEXT: EMIT vp<[[EP_IV_NEXT:%.+]]> = add nuw vp<[[EP_IV]]>, ir<16>
; CHECK-NEXT: EMIT branch-on-count vp<[[EP_IV_NEXT]]>, ir<1024>
; CHECK-NEXT: Successor(s): middle.block, vector.body
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<%accum>, ir<%add>
; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<%accum>, vp<[[REDUCE]]>
; CHECK-NEXT: EMIT vp<[[EXTRACT:%.+]]> = extract-last-element vp<[[RED_RESULT]]>
; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq ir<1024>, ir<1024>
; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]>