From 41078e9dfb3c64a111182bcc1fdd28c57fed95dc Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 25 Nov 2024 16:31:48 -0500 Subject: [PATCH] [CIR][CIRGen][Builtin][Neon] Lower neon_vabs_v and neon_vabsq_v (#1081) Now implement the same as [OG](https://github.com/llvm/clangir/blob/7619b20d7461b2d46c17a3154ec4b2f12ca35ea5/clang/lib/CodeGen/CGBuiltin.cpp#L7886), which is to call llvm aarch64 intrinsic which would eventually become [an ARM64 instruction](https://developer.arm.com/documentation/ddi0596/2021-03/SIMD-FP-Instructions/ABS--Absolute-value--vector--?lang=en). However, clearly there is an alternative, which is to extend CIR::AbsOp and CIR::FAbsOp to support vector type and only lower it at LLVM Lowering stage to either [LLVM::FAbsOP ](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrfabs-llvmfabsop) or [[LLVM::AbsOP ]](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrabs-llvmabsop), provided LLVM dialect could do the right thing of TargetLowering by translating to llvm aarch64 intrinsic eventually. The question is whether it is worth doing it? Any way, put up this diff for suggestions and ideas. --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 9 ++ clang/test/CIR/CodeGen/AArch64/neon-arith.c | 143 ++++++++++++++++++ 2 files changed, 152 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 8b043dca3adf..b1158273bd6e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2362,6 +2362,15 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( return emitNeonSplat(builder, getLoc(e->getExprLoc()), ops[0], ops[1], numElements); } + case NEON::BI__builtin_neon_vabs_v: + case NEON::BI__builtin_neon_vabsq_v: { + mlir::Location loc = getLoc(e->getExprLoc()); + ops[0] = builder.createBitcast(ops[0], vTy); + if (mlir::isa(vTy.getEltType())) { + return builder.create(loc, ops[0]); + } + return builder.create(loc, ops[0]); + } case NEON::BI__builtin_neon_vmovl_v: { cir::VectorType dTy = builder.getExtendedOrTruncatedElementVectorType( vTy, false /* truncate */, diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index fbc9ce71343d..3f839cce90fc 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -739,3 +739,146 @@ uint64x2_t test_vpaddlq_u32(uint32x4_t a) { // LLVM: [[VPADDL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> [[A]]) // LLVM: ret <2 x i64> [[VPADDL1_I]] } + +int8x8_t test_vabs_s8(int8x8_t a) { + return vabs_s8(a); + + // CIR-LABEL: vabs_s8 + // CIR: cir.abs {{%.*}} : !cir.vector + + // LLVM: {{.*}}test_vabs_s8(<8 x i8>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_I:%.*]] = call <8 x i8> @llvm.abs.v8i8(<8 x i8> [[a]], i1 false) + // LLVM: ret <8 x i8> [[VABS_I]] +} + +int8x16_t test_vabsq_s8(int8x16_t a) { + return vabsq_s8(a); + + // CIR-LABEL: vabsq_s8 + // CIR: cir.abs {{%.*}} : !cir.vector + + // LLVM: {{.*}}test_vabsq_s8(<16 x i8>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_I:%.*]] = call <16 x i8> @llvm.abs.v16i8(<16 x i8> [[a]], i1 false) + // LLVM: ret <16 x i8> [[VABS_I]] +} + +int16x4_t test_vabs_s16(int16x4_t a) { + return vabs_s16(a); + + // CIR-LABEL: vabs_s16 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabs_s16(<4 x i16>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <4 x i16> @llvm.abs.v4i16(<4 x i16> [[a]], i1 false) + // LLVM: ret <4 x i16> [[VABS1_I]] +} + +int16x8_t test_vabsq_s16(int16x8_t a) { + return vabsq_s16(a); + + // CIR-LABEL: vabsq_s16 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabsq_s16(<8 x i16>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <8 x i16> @llvm.abs.v8i16(<8 x i16> [[a]], i1 false) + // LLVM: ret <8 x i16> [[VABS1_I]] +} + +int32x2_t test_vabs_s32(int32x2_t a) { + return vabs_s32(a); + + // CIR-LABEL: vabs_s32 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabs_s32(<2 x i32>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <2 x i32> @llvm.abs.v2i32(<2 x i32> [[a]], i1 false) + // LLVM: ret <2 x i32> [[VABS1_I]] +} + +int32x4_t test_vabsq_s32(int32x4_t a) { + return vabsq_s32(a); + + // CIR-LABEL: vabsq_s32 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabsq_s32(<4 x i32>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[a]], i1 false) + // LLVM: ret <4 x i32> [[VABS1_I]] +} + +int64x1_t test_vabs_s64(int64x1_t a) { + return vabs_s64(a); + + // CIR-LABEL: vabs_s64 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabs_s64(<1 x i64>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <1 x i64> @llvm.abs.v1i64(<1 x i64> [[a]], i1 false) + // LLVM: ret <1 x i64> [[VABS1_I]] +} + +int64x2_t test_vabsq_s64(int64x2_t a) { + return vabsq_s64(a); + + // CIR-LABEL: vabsq_s64 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabsq_s64(<2 x i64>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> [[a]], i1 false) + // LLVM: ret <2 x i64> [[VABS1_I]] +} + + +float32x2_t test_vabs_f32(float32x2_t a) { + return vabs_f32(a); + + // CIR-LABEL: vabs_f32 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.fabs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabs_f32(<2 x float>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_F:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[a]]) + // LLVM: ret <2 x float> [[VABS_F]] +} + +float32x4_t test_vabsq_f32(float32x4_t a) { + return vabsq_f32(a); + + // CIR-LABEL: vabsq_f32 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.fabs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabsq_f32(<4 x float>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_F:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> [[a]]) + // LLVM: ret <4 x float> [[VABS_F]] +} + +float64x1_t test_vabs_f64(float64x1_t a) { + return vabs_f64(a); + + // CIR-LABEL: vabs_f64 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.fabs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabs_f64(<1 x double>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_F:%.*]] = call <1 x double> @llvm.fabs.v1f64(<1 x double> [[a]]) + // LLVM: ret <1 x double> [[VABS_F]] +} + +float64x2_t test_vabsq_f64(float64x2_t a) { + return vabsq_f64(a); + + // CIR-LABEL: vabsq_f64 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.fabs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabsq_f64(<2 x double>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_F:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[a]]) + // LLVM: ret <2 x double> [[VABS_F]] +}