Skip to content

Commit bf03788

Browse files
fhahntstellar
authored andcommitted
Revert "[X86][DAGISel] Don't widen shuffle element with AVX512"
This reverts commit 5fb4134. This patch is causing crashes when building llvm-test-suite when optimizing for CPUs with AVX512. Reproducer crashing with llc: target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx" define i32 @test(<32 x i32> %0) #0 { entry: %1 = mul <32 x i32> %0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %2 = tail call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %1) ret i32 %2 } ; Function Attrs: nocallback nofree nosync nounwind readnone willreturn declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>) hfinkel#1 attributes #0 = { "min-legal-vector-width"="0" "target-cpu"="skylake-avx512" } attributes hfinkel#1 = { nocallback nofree nosync nounwind readnone willreturn } (cherry picked from commit f912bab111add1275fcaf5f24e4d3654127972d0)
1 parent abf0175 commit bf03788

File tree

3 files changed

+45
-91
lines changed

3 files changed

+45
-91
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -19304,44 +19304,6 @@ static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
1930419304
return false;
1930519305
}
1930619306

19307-
static bool canCombineAsMaskOperation(SDValue V1, SDValue V2,
19308-
const X86Subtarget &Subtarget) {
19309-
if (!Subtarget.hasAVX512())
19310-
return false;
19311-
19312-
MVT VT = V1.getSimpleValueType().getScalarType();
19313-
if ((VT == MVT::i16 || VT == MVT::i8) && !Subtarget.hasBWI())
19314-
return false;
19315-
19316-
// i8 is better to be widen to i16, because there is PBLENDW for vXi16
19317-
// when the vector bit size is 128 or 256.
19318-
if (VT == MVT::i8 && V1.getSimpleValueType().getSizeInBits() < 512)
19319-
return false;
19320-
19321-
auto HasMaskOperation = [&](SDValue V) {
19322-
// TODO: Currently we only check limited opcode. We probably extend
19323-
// it to all binary operation by checking TLI.isBinOp().
19324-
switch (V->getOpcode()) {
19325-
default:
19326-
return false;
19327-
case ISD::ADD:
19328-
case ISD::SUB:
19329-
case ISD::AND:
19330-
case ISD::XOR:
19331-
break;
19332-
}
19333-
if (!V->hasOneUse())
19334-
return false;
19335-
19336-
return true;
19337-
};
19338-
19339-
if (HasMaskOperation(V1) || HasMaskOperation(V2))
19340-
return true;
19341-
19342-
return false;
19343-
}
19344-
1934519307
// Forward declaration.
1934619308
static SDValue canonicalizeShuffleMaskWithHorizOp(
1934719309
MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
@@ -19417,7 +19379,6 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
1941719379
// integers to handle flipping the low and high halves of AVX 256-bit vectors.
1941819380
SmallVector<int, 16> WidenedMask;
1941919381
if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
19420-
!canCombineAsMaskOperation(V1, V2, Subtarget) &&
1942119382
canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
1942219383
// Shuffle mask widening should not interfere with a broadcast opportunity
1942319384
// by obfuscating the operands with bitcasts.

llvm/test/CodeGen/X86/avx512-shuffles/shuffle-blend.ll

Lines changed: 39 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,29 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
22
; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX512F,X86-AVX512F
33
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX512F,X64-AVX512F
4-
; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX512BW,X86-AVX512BW
5-
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX512BW,X64-AVX512BW
4+
; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX512BW
5+
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX512BW
66

77
define <16 x i32> @shuffle_v8i64(<16 x i32> %t0, <16 x i32> %t1) {
8-
; CHECK-LABEL: shuffle_v8i64:
9-
; CHECK: # %bb.0: # %entry
10-
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm2
11-
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0
12-
; CHECK-NEXT: vshufps {{.*#+}} zmm0 = zmm2[0,1],zmm0[2,3],zmm2[4,5],zmm0[6,7],zmm2[8,9],zmm0[10,11],zmm2[12,13],zmm0[14,15]
13-
; CHECK-NEXT: ret{{[l|q]}}
8+
; AVX512F-LABEL: shuffle_v8i64:
9+
; AVX512F: # %bb.0: # %entry
10+
; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm2
11+
; AVX512F-NEXT: vpsubd %zmm1, %zmm0, %zmm0
12+
; AVX512F-NEXT: movb $-86, %al
13+
; AVX512F-NEXT: kmovw %eax, %k1
14+
; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1}
15+
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
16+
; AVX512F-NEXT: ret{{[l|q]}}
17+
;
18+
; AVX512BW-LABEL: shuffle_v8i64:
19+
; AVX512BW: # %bb.0: # %entry
20+
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm2
21+
; AVX512BW-NEXT: vpsubd %zmm1, %zmm0, %zmm0
22+
; AVX512BW-NEXT: movb $-86, %al
23+
; AVX512BW-NEXT: kmovd %eax, %k1
24+
; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1}
25+
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
26+
; AVX512BW-NEXT: ret{{[l|q]}}
1427
entry:
1528
%t2 = add nsw <16 x i32> %t0, %t1
1629
%t3 = sub nsw <16 x i32> %t0, %t1
@@ -83,24 +96,15 @@ define <64 x i8> @addb_selectw_64xi8(<64 x i8> %t0, <64 x i8> %t1) {
8396
; X64-AVX512F-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
8497
; X64-AVX512F-NEXT: retq
8598
;
86-
; X86-AVX512BW-LABEL: addb_selectw_64xi8:
87-
; X86-AVX512BW: # %bb.0:
88-
; X86-AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm2
89-
; X86-AVX512BW-NEXT: movl $3, %eax
90-
; X86-AVX512BW-NEXT: kmovd %eax, %k0
91-
; X86-AVX512BW-NEXT: kmovd %k0, %k1
92-
; X86-AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm2 {%k1}
93-
; X86-AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
94-
; X86-AVX512BW-NEXT: retl
95-
;
96-
; X64-AVX512BW-LABEL: addb_selectw_64xi8:
97-
; X64-AVX512BW: # %bb.0:
98-
; X64-AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm2
99-
; X64-AVX512BW-NEXT: movl $3, %eax
100-
; X64-AVX512BW-NEXT: kmovq %rax, %k1
101-
; X64-AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm2 {%k1}
102-
; X64-AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
103-
; X64-AVX512BW-NEXT: retq
99+
; AVX512BW-LABEL: addb_selectw_64xi8:
100+
; AVX512BW: # %bb.0:
101+
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm2
102+
; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
103+
; AVX512BW-NEXT: movl $1, %eax
104+
; AVX512BW-NEXT: kmovd %eax, %k1
105+
; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
106+
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
107+
; AVX512BW-NEXT: ret{{[l|q]}}
104108
%t2 = add nsw <64 x i8> %t0, %t1
105109
%t3 = sub nsw <64 x i8> %t0, %t1
106110
%t4 = shufflevector <64 x i8> %t2, <64 x i8> %t3, <64 x i32> <i32 64, i32 65, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
@@ -165,9 +169,10 @@ define <32 x i16> @addw_selectd_32xi16(<32 x i16> %t0, <32 x i16> %t1) {
165169
; AVX512BW-LABEL: addw_selectd_32xi16:
166170
; AVX512BW: # %bb.0:
167171
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm2
168-
; AVX512BW-NEXT: movl $3, %eax
172+
; AVX512BW-NEXT: vpsubw %zmm1, %zmm0, %zmm0
173+
; AVX512BW-NEXT: movw $1, %ax
169174
; AVX512BW-NEXT: kmovd %eax, %k1
170-
; AVX512BW-NEXT: vpsubw %zmm1, %zmm0, %zmm2 {%k1}
175+
; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm2 {%k1}
171176
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
172177
; AVX512BW-NEXT: ret{{[l|q]}}
173178
%t2 = add nsw <32 x i16> %t0, %t1
@@ -193,18 +198,20 @@ define <16 x i32> @addd_selectq_16xi32(<16 x i32> %t0, <16 x i32> %t1) {
193198
; AVX512F-LABEL: addd_selectq_16xi32:
194199
; AVX512F: # %bb.0:
195200
; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm2
196-
; AVX512F-NEXT: movw $3, %ax
201+
; AVX512F-NEXT: vpsubd %zmm1, %zmm0, %zmm0
202+
; AVX512F-NEXT: movb $1, %al
197203
; AVX512F-NEXT: kmovw %eax, %k1
198-
; AVX512F-NEXT: vpsubd %zmm1, %zmm0, %zmm2 {%k1}
204+
; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1}
199205
; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
200206
; AVX512F-NEXT: ret{{[l|q]}}
201207
;
202208
; AVX512BW-LABEL: addd_selectq_16xi32:
203209
; AVX512BW: # %bb.0:
204210
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm2
205-
; AVX512BW-NEXT: movw $3, %ax
211+
; AVX512BW-NEXT: vpsubd %zmm1, %zmm0, %zmm0
212+
; AVX512BW-NEXT: movb $1, %al
206213
; AVX512BW-NEXT: kmovd %eax, %k1
207-
; AVX512BW-NEXT: vpsubd %zmm1, %zmm0, %zmm2 {%k1}
214+
; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1}
208215
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
209216
; AVX512BW-NEXT: ret{{[l|q]}}
210217
%t2 = add nsw <16 x i32> %t0, %t1

llvm/test/CodeGen/X86/combine-sdiv.ll

Lines changed: 6 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2889,26 +2889,12 @@ define <8 x i16> @combine_vec_sdiv_nonuniform7(<8 x i16> %x) {
28892889
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
28902890
; AVX1-NEXT: retq
28912891
;
2892-
; AVX2-LABEL: combine_vec_sdiv_nonuniform7:
2893-
; AVX2: # %bb.0:
2894-
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
2895-
; AVX2-NEXT: vpsubw %xmm0, %xmm1, %xmm1
2896-
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
2897-
; AVX2-NEXT: retq
2898-
;
2899-
; AVX512F-LABEL: combine_vec_sdiv_nonuniform7:
2900-
; AVX512F: # %bb.0:
2901-
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
2902-
; AVX512F-NEXT: vpsubw %xmm0, %xmm1, %xmm1
2903-
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
2904-
; AVX512F-NEXT: retq
2905-
;
2906-
; AVX512BW-LABEL: combine_vec_sdiv_nonuniform7:
2907-
; AVX512BW: # %bb.0:
2908-
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
2909-
; AVX512BW-NEXT: vpsubw %xmm0, %xmm1, %xmm1
2910-
; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
2911-
; AVX512BW-NEXT: retq
2892+
; AVX2ORLATER-LABEL: combine_vec_sdiv_nonuniform7:
2893+
; AVX2ORLATER: # %bb.0:
2894+
; AVX2ORLATER-NEXT: vpxor %xmm1, %xmm1, %xmm1
2895+
; AVX2ORLATER-NEXT: vpsubw %xmm0, %xmm1, %xmm1
2896+
; AVX2ORLATER-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
2897+
; AVX2ORLATER-NEXT: retq
29122898
;
29132899
; XOP-LABEL: combine_vec_sdiv_nonuniform7:
29142900
; XOP: # %bb.0:

0 commit comments

Comments
 (0)