@@ -31,11 +31,38 @@ define i64 @same_exit_block_pre_inc_use1() #0 {
31
31
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
32
32
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[OFFSET_IDX]]
33
33
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 0
34
+ ; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
35
+ ; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 16
36
+ ; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[TMP19]]
37
+ ; CHECK-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64()
38
+ ; CHECK-NEXT: [[TMP34:%.*]] = mul nuw i64 [[TMP33]], 32
39
+ ; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[TMP34]]
40
+ ; CHECK-NEXT: [[TMP36:%.*]] = call i64 @llvm.vscale.i64()
41
+ ; CHECK-NEXT: [[TMP37:%.*]] = mul nuw i64 [[TMP36]], 48
42
+ ; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[TMP37]]
34
43
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP8]], align 1
44
+ ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 16 x i8>, ptr [[TMP29]], align 1
45
+ ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 16 x i8>, ptr [[TMP35]], align 1
46
+ ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP38]], align 1
35
47
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[OFFSET_IDX]]
36
48
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i32 0
49
+ ; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
50
+ ; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 16
51
+ ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[TMP21]]
52
+ ; CHECK-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
53
+ ; CHECK-NEXT: [[TMP24:%.*]] = mul nuw i64 [[TMP23]], 32
54
+ ; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[TMP24]]
55
+ ; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64()
56
+ ; CHECK-NEXT: [[TMP27:%.*]] = mul nuw i64 [[TMP26]], 48
57
+ ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[TMP27]]
37
58
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1
59
+ ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 16 x i8>, ptr [[TMP22]], align 1
60
+ ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 16 x i8>, ptr [[TMP25]], align 1
61
+ ; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 16 x i8>, ptr [[TMP28]], align 1
38
62
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
63
+ ; CHECK-NEXT: [[TMP30:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD5]], [[WIDE_LOAD6]]
64
+ ; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD3]], [[WIDE_LOAD7]]
65
+ ; CHECK-NEXT: [[TMP32:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD4]], [[WIDE_LOAD8]]
39
66
; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], [[TMP5]]
40
67
; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> [[TMP11]])
41
68
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT3]], [[N_VEC]]
@@ -47,8 +74,28 @@ define i64 @same_exit_block_pre_inc_use1() #0 {
47
74
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 510, [[N_VEC]]
48
75
; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOP_END:%.*]], label [[SCALAR_PH]]
49
76
; CHECK: vector.early.exit:
77
+ ; CHECK-NEXT: [[TMP63:%.*]] = call i64 @llvm.vscale.i64()
78
+ ; CHECK-NEXT: [[TMP42:%.*]] = mul nuw i64 [[TMP63]], 16
79
+ ; CHECK-NEXT: [[TMP43:%.*]] = mul i64 1, [[TMP42]]
80
+ ; CHECK-NEXT: [[TMP44:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1(<vscale x 16 x i1> [[TMP32]], i1 true)
81
+ ; CHECK-NEXT: [[TMP62:%.*]] = mul i64 [[TMP42]], 3
82
+ ; CHECK-NEXT: [[TMP45:%.*]] = add i64 [[TMP62]], [[TMP44]]
83
+ ; CHECK-NEXT: [[TMP46:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1(<vscale x 16 x i1> [[TMP31]], i1 true)
84
+ ; CHECK-NEXT: [[TMP58:%.*]] = mul i64 [[TMP42]], 2
85
+ ; CHECK-NEXT: [[TMP50:%.*]] = add i64 [[TMP58]], [[TMP46]]
86
+ ; CHECK-NEXT: [[TMP47:%.*]] = icmp ne i64 [[TMP50]], [[TMP43]]
87
+ ; CHECK-NEXT: [[TMP51:%.*]] = select i1 [[TMP47]], i64 [[TMP50]], i64 [[TMP45]]
88
+ ; CHECK-NEXT: [[TMP52:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1(<vscale x 16 x i1> [[TMP30]], i1 true)
89
+ ; CHECK-NEXT: [[TMP64:%.*]] = mul i64 [[TMP42]], 1
90
+ ; CHECK-NEXT: [[TMP56:%.*]] = add i64 [[TMP64]], [[TMP52]]
91
+ ; CHECK-NEXT: [[TMP53:%.*]] = icmp ne i64 [[TMP56]], [[TMP43]]
92
+ ; CHECK-NEXT: [[TMP57:%.*]] = select i1 [[TMP53]], i64 [[TMP56]], i64 [[TMP51]]
50
93
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1(<vscale x 16 x i1> [[TMP11]], i1 true)
51
- ; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX1]], [[TMP15]]
94
+ ; CHECK-NEXT: [[TMP65:%.*]] = mul i64 [[TMP42]], 0
95
+ ; CHECK-NEXT: [[TMP60:%.*]] = add i64 [[TMP65]], [[TMP15]]
96
+ ; CHECK-NEXT: [[TMP59:%.*]] = icmp ne i64 [[TMP60]], [[TMP43]]
97
+ ; CHECK-NEXT: [[TMP61:%.*]] = select i1 [[TMP59]], i64 [[TMP60]], i64 [[TMP57]]
98
+ ; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX1]], [[TMP61]]
52
99
; CHECK-NEXT: [[TMP17:%.*]] = add i64 3, [[TMP16]]
53
100
; CHECK-NEXT: br label [[LOOP_END]]
54
101
; CHECK: scalar.ph:
0 commit comments