Skip to content

Commit 41078e9

Browse files
authored
[CIR][CIRGen][Builtin][Neon] Lower neon_vabs_v and neon_vabsq_v (#1081)
Now implement the same as [OG](https://github.com/llvm/clangir/blob/7619b20d7461b2d46c17a3154ec4b2f12ca35ea5/clang/lib/CodeGen/CGBuiltin.cpp#L7886), which is to call llvm aarch64 intrinsic which would eventually become [an ARM64 instruction](https://developer.arm.com/documentation/ddi0596/2021-03/SIMD-FP-Instructions/ABS--Absolute-value--vector--?lang=en). However, clearly there is an alternative, which is to extend CIR::AbsOp and CIR::FAbsOp to support vector type and only lower it at LLVM Lowering stage to either [LLVM::FAbsOP ](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrfabs-llvmfabsop) or [[LLVM::AbsOP ]](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrabs-llvmabsop), provided LLVM dialect could do the right thing of TargetLowering by translating to llvm aarch64 intrinsic eventually. The question is whether it is worth doing it? Any way, put up this diff for suggestions and ideas.
1 parent 9db508a commit 41078e9

File tree

2 files changed

+152
-0
lines changed

2 files changed

+152
-0
lines changed

clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2362,6 +2362,15 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr(
23622362
return emitNeonSplat(builder, getLoc(e->getExprLoc()), ops[0], ops[1],
23632363
numElements);
23642364
}
2365+
case NEON::BI__builtin_neon_vabs_v:
2366+
case NEON::BI__builtin_neon_vabsq_v: {
2367+
mlir::Location loc = getLoc(e->getExprLoc());
2368+
ops[0] = builder.createBitcast(ops[0], vTy);
2369+
if (mlir::isa<cir::SingleType, cir::DoubleType>(vTy.getEltType())) {
2370+
return builder.create<cir::FAbsOp>(loc, ops[0]);
2371+
}
2372+
return builder.create<cir::AbsOp>(loc, ops[0]);
2373+
}
23652374
case NEON::BI__builtin_neon_vmovl_v: {
23662375
cir::VectorType dTy = builder.getExtendedOrTruncatedElementVectorType(
23672376
vTy, false /* truncate */,

clang/test/CIR/CodeGen/AArch64/neon-arith.c

Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -739,3 +739,146 @@ uint64x2_t test_vpaddlq_u32(uint32x4_t a) {
739739
// LLVM: [[VPADDL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> [[A]])
740740
// LLVM: ret <2 x i64> [[VPADDL1_I]]
741741
}
742+
743+
int8x8_t test_vabs_s8(int8x8_t a) {
744+
return vabs_s8(a);
745+
746+
// CIR-LABEL: vabs_s8
747+
// CIR: cir.abs {{%.*}} : !cir.vector<!s8i x 8>
748+
749+
// LLVM: {{.*}}test_vabs_s8(<8 x i8>{{.*}}[[a:%.*]])
750+
// LLVM: [[VABS_I:%.*]] = call <8 x i8> @llvm.abs.v8i8(<8 x i8> [[a]], i1 false)
751+
// LLVM: ret <8 x i8> [[VABS_I]]
752+
}
753+
754+
int8x16_t test_vabsq_s8(int8x16_t a) {
755+
return vabsq_s8(a);
756+
757+
// CIR-LABEL: vabsq_s8
758+
// CIR: cir.abs {{%.*}} : !cir.vector<!s8i x 16>
759+
760+
// LLVM: {{.*}}test_vabsq_s8(<16 x i8>{{.*}}[[a:%.*]])
761+
// LLVM: [[VABS_I:%.*]] = call <16 x i8> @llvm.abs.v16i8(<16 x i8> [[a]], i1 false)
762+
// LLVM: ret <16 x i8> [[VABS_I]]
763+
}
764+
765+
int16x4_t test_vabs_s16(int16x4_t a) {
766+
return vabs_s16(a);
767+
768+
// CIR-LABEL: vabs_s16
769+
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 8>), !cir.vector<!s16i x 4>
770+
// CIR: cir.abs [[TMP0]] : !cir.vector<!s16i x 4>
771+
772+
// LLVM: {{.*}}test_vabs_s16(<4 x i16>{{.*}}[[a:%.*]])
773+
// LLVM: [[VABS1_I:%.*]] = call <4 x i16> @llvm.abs.v4i16(<4 x i16> [[a]], i1 false)
774+
// LLVM: ret <4 x i16> [[VABS1_I]]
775+
}
776+
777+
int16x8_t test_vabsq_s16(int16x8_t a) {
778+
return vabsq_s16(a);
779+
780+
// CIR-LABEL: vabsq_s16
781+
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 16>), !cir.vector<!s16i x 8>
782+
// CIR: cir.abs [[TMP0]] : !cir.vector<!s16i x 8>
783+
784+
// LLVM: {{.*}}test_vabsq_s16(<8 x i16>{{.*}}[[a:%.*]])
785+
// LLVM: [[VABS1_I:%.*]] = call <8 x i16> @llvm.abs.v8i16(<8 x i16> [[a]], i1 false)
786+
// LLVM: ret <8 x i16> [[VABS1_I]]
787+
}
788+
789+
int32x2_t test_vabs_s32(int32x2_t a) {
790+
return vabs_s32(a);
791+
792+
// CIR-LABEL: vabs_s32
793+
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 8>), !cir.vector<!s32i x 2>
794+
// CIR: cir.abs [[TMP0]] : !cir.vector<!s32i x 2>
795+
796+
// LLVM: {{.*}}test_vabs_s32(<2 x i32>{{.*}}[[a:%.*]])
797+
// LLVM: [[VABS1_I:%.*]] = call <2 x i32> @llvm.abs.v2i32(<2 x i32> [[a]], i1 false)
798+
// LLVM: ret <2 x i32> [[VABS1_I]]
799+
}
800+
801+
int32x4_t test_vabsq_s32(int32x4_t a) {
802+
return vabsq_s32(a);
803+
804+
// CIR-LABEL: vabsq_s32
805+
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 16>), !cir.vector<!s32i x 4>
806+
// CIR: cir.abs [[TMP0]] : !cir.vector<!s32i x 4>
807+
808+
// LLVM: {{.*}}test_vabsq_s32(<4 x i32>{{.*}}[[a:%.*]])
809+
// LLVM: [[VABS1_I:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[a]], i1 false)
810+
// LLVM: ret <4 x i32> [[VABS1_I]]
811+
}
812+
813+
int64x1_t test_vabs_s64(int64x1_t a) {
814+
return vabs_s64(a);
815+
816+
// CIR-LABEL: vabs_s64
817+
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 8>), !cir.vector<!s64i x 1>
818+
// CIR: cir.abs [[TMP0]] : !cir.vector<!s64i x 1>
819+
820+
// LLVM: {{.*}}test_vabs_s64(<1 x i64>{{.*}}[[a:%.*]])
821+
// LLVM: [[VABS1_I:%.*]] = call <1 x i64> @llvm.abs.v1i64(<1 x i64> [[a]], i1 false)
822+
// LLVM: ret <1 x i64> [[VABS1_I]]
823+
}
824+
825+
int64x2_t test_vabsq_s64(int64x2_t a) {
826+
return vabsq_s64(a);
827+
828+
// CIR-LABEL: vabsq_s64
829+
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 16>), !cir.vector<!s64i x 2>
830+
// CIR: cir.abs [[TMP0]] : !cir.vector<!s64i x 2>
831+
832+
// LLVM: {{.*}}test_vabsq_s64(<2 x i64>{{.*}}[[a:%.*]])
833+
// LLVM: [[VABS1_I:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> [[a]], i1 false)
834+
// LLVM: ret <2 x i64> [[VABS1_I]]
835+
}
836+
837+
838+
float32x2_t test_vabs_f32(float32x2_t a) {
839+
return vabs_f32(a);
840+
841+
// CIR-LABEL: vabs_f32
842+
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 8>), !cir.vector<!cir.float x 2>
843+
// CIR: cir.fabs [[TMP0]] : !cir.vector<!cir.float x 2>
844+
845+
// LLVM: {{.*}}test_vabs_f32(<2 x float>{{.*}}[[a:%.*]])
846+
// LLVM: [[VABS_F:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[a]])
847+
// LLVM: ret <2 x float> [[VABS_F]]
848+
}
849+
850+
float32x4_t test_vabsq_f32(float32x4_t a) {
851+
return vabsq_f32(a);
852+
853+
// CIR-LABEL: vabsq_f32
854+
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 16>), !cir.vector<!cir.float x 4>
855+
// CIR: cir.fabs [[TMP0]] : !cir.vector<!cir.float x 4>
856+
857+
// LLVM: {{.*}}test_vabsq_f32(<4 x float>{{.*}}[[a:%.*]])
858+
// LLVM: [[VABS_F:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> [[a]])
859+
// LLVM: ret <4 x float> [[VABS_F]]
860+
}
861+
862+
float64x1_t test_vabs_f64(float64x1_t a) {
863+
return vabs_f64(a);
864+
865+
// CIR-LABEL: vabs_f64
866+
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 8>), !cir.vector<!cir.double x 1>
867+
// CIR: cir.fabs [[TMP0]] : !cir.vector<!cir.double x 1>
868+
869+
// LLVM: {{.*}}test_vabs_f64(<1 x double>{{.*}}[[a:%.*]])
870+
// LLVM: [[VABS_F:%.*]] = call <1 x double> @llvm.fabs.v1f64(<1 x double> [[a]])
871+
// LLVM: ret <1 x double> [[VABS_F]]
872+
}
873+
874+
float64x2_t test_vabsq_f64(float64x2_t a) {
875+
return vabsq_f64(a);
876+
877+
// CIR-LABEL: vabsq_f64
878+
// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector<!s8i x 16>), !cir.vector<!cir.double x 2>
879+
// CIR: cir.fabs [[TMP0]] : !cir.vector<!cir.double x 2>
880+
881+
// LLVM: {{.*}}test_vabsq_f64(<2 x double>{{.*}}[[a:%.*]])
882+
// LLVM: [[VABS_F:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[a]])
883+
// LLVM: ret <2 x double> [[VABS_F]]
884+
}

0 commit comments

Comments
 (0)