diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp index 32d552625a8e8b9..d7ac3afe7b76b21 100644 --- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp +++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp @@ -257,16 +257,24 @@ static OperandInfo getOperandInfo(const MachineOperand &MO, // Vector Unit-Stride Instructions // Vector Strided Instructions /// Dest EEW encoded in the instruction and EMUL=(EEW/SEW)*LMUL + case RISCV::VLE8_V: case RISCV::VSE8_V: + case RISCV::VLSE8_V: case RISCV::VSSE8_V: return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(3, MI), 3); + case RISCV::VLE16_V: case RISCV::VSE16_V: + case RISCV::VLSE16_V: case RISCV::VSSE16_V: return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(4, MI), 4); + case RISCV::VLE32_V: case RISCV::VSE32_V: + case RISCV::VLSE32_V: case RISCV::VSSE32_V: return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(5, MI), 5); + case RISCV::VLE64_V: case RISCV::VSE64_V: + case RISCV::VLSE64_V: case RISCV::VSSE64_V: return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(6, MI), 6); @@ -732,6 +740,31 @@ static bool isSupportedInstr(const MachineInstr &MI) { return false; switch (RVV->BaseInstr) { + // Vector Unit-Stride Instructions + // Vector Strided Instructions + case RISCV::VLE8_V: + case RISCV::VLSE8_V: + case RISCV::VLE16_V: + case RISCV::VLSE16_V: + case RISCV::VLE32_V: + case RISCV::VLSE32_V: + case RISCV::VLE64_V: + case RISCV::VLSE64_V: + // Vector Indexed Instructions + case RISCV::VLUXEI8_V: + case RISCV::VLOXEI8_V: + case RISCV::VLUXEI16_V: + case RISCV::VLOXEI16_V: + case RISCV::VLUXEI32_V: + case RISCV::VLOXEI32_V: + case RISCV::VLUXEI64_V: + case RISCV::VLOXEI64_V: { + for (const MachineMemOperand *MMO : MI.memoperands()) + if (MMO->isVolatile()) + return false; + return true; + } + // Vector Single-Width Integer Add and Subtract case RISCV::VADD_VI: case RISCV::VADD_VV: diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll index 10d24927d9b7836..4d34621cd5f243c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll @@ -1445,10 +1445,9 @@ define @vp_bitreverse_nxv1i64( %va, @vp_bitreverse_nxv1i64_unmasked( %va ; RV32-NEXT: vand.vx v13, v8, a1 ; RV32-NEXT: vand.vx v12, v12, a1 ; RV32-NEXT: vor.vv v11, v12, v11 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a6), zero -; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vsll.vx v13, v13, a4 ; RV32-NEXT: vor.vv v10, v10, v13 ; RV32-NEXT: vsrl.vi v13, v8, 8 @@ -1730,10 +1727,9 @@ define @vp_bitreverse_nxv2i64( %va, @vp_bitreverse_nxv2i64_unmasked( %va ; RV32-NEXT: vand.vx v18, v8, a1 ; RV32-NEXT: vand.vx v16, v16, a1 ; RV32-NEXT: vor.vv v10, v16, v10 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v16, (a6), zero -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsll.vx v18, v18, a4 ; RV32-NEXT: vor.vv v12, v12, v18 ; RV32-NEXT: vsrl.vi v18, v8, 8 @@ -2015,10 +2009,9 @@ define @vp_bitreverse_nxv4i64( %va, @vp_bitreverse_nxv4i64_unmasked( %va ; RV32-NEXT: vand.vx v28, v8, a1 ; RV32-NEXT: vand.vx v24, v24, a1 ; RV32-NEXT: vor.vv v12, v24, v12 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v24, (a6), zero -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsll.vx v28, v28, a4 ; RV32-NEXT: vor.vv v16, v16, v28 ; RV32-NEXT: vsrl.vi v28, v8, 8 @@ -2315,7 +2306,6 @@ define @vp_bitreverse_nxv7i64( %va, @vp_bitreverse_nxv7i64( %va, @vp_bitreverse_nxv7i64_unmasked( %va ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a6), zero -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 24 ; RV32-NEXT: vand.vx v16, v16, a5 ; RV32-NEXT: vsrl.vi v0, v8, 8 @@ -2704,7 +2691,6 @@ define @vp_bitreverse_nxv8i64( %va, @vp_bitreverse_nxv8i64( %va, @vp_bitreverse_nxv8i64_unmasked( %va ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a6), zero -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 24 ; RV32-NEXT: vand.vx v16, v16, a5 ; RV32-NEXT: vsrl.vi v0, v8, 8 diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll index 0dc1d0c32ac4497..0c58cca0f94726b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll @@ -523,11 +523,9 @@ define @vp_bswap_nxv1i64( %va, @vp_bswap_nxv1i64( %va, @vp_bswap_nxv1i64_unmasked( %va, i32 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsll.vx v10, v8, a2 -; RV32-NEXT: addi a1, a3, -256 +; RV32-NEXT: addi a0, a3, -256 ; RV32-NEXT: vsrl.vx v11, v8, a2 ; RV32-NEXT: vsrl.vx v12, v8, a4 -; RV32-NEXT: vand.vx v13, v8, a1 -; RV32-NEXT: vand.vx v12, v12, a1 +; RV32-NEXT: vand.vx v13, v8, a0 +; RV32-NEXT: vand.vx v12, v12, a0 ; RV32-NEXT: vor.vv v11, v12, v11 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a6), zero -; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vsll.vx v13, v13, a4 ; RV32-NEXT: vor.vv v10, v10, v13 ; RV32-NEXT: vsrl.vi v13, v8, 8 @@ -695,11 +691,9 @@ define @vp_bswap_nxv2i64( %va, @vp_bswap_nxv2i64( %va, @vp_bswap_nxv2i64_unmasked( %va, i32 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsll.vx v12, v8, a2 -; RV32-NEXT: addi a1, a3, -256 +; RV32-NEXT: addi a0, a3, -256 ; RV32-NEXT: vsrl.vx v14, v8, a2 ; RV32-NEXT: vsrl.vx v16, v8, a4 -; RV32-NEXT: vand.vx v18, v8, a1 -; RV32-NEXT: vand.vx v16, v16, a1 +; RV32-NEXT: vand.vx v18, v8, a0 +; RV32-NEXT: vand.vx v16, v16, a0 ; RV32-NEXT: vor.vv v14, v16, v14 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v16, (a6), zero -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsll.vx v18, v18, a4 ; RV32-NEXT: vor.vv v12, v12, v18 ; RV32-NEXT: vsrl.vi v18, v8, 8 @@ -867,11 +859,9 @@ define @vp_bswap_nxv4i64( %va, @vp_bswap_nxv4i64( %va, @vp_bswap_nxv4i64_unmasked( %va, i32 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsll.vx v16, v8, a2 -; RV32-NEXT: addi a1, a3, -256 +; RV32-NEXT: addi a0, a3, -256 ; RV32-NEXT: vsrl.vx v20, v8, a2 ; RV32-NEXT: vsrl.vx v24, v8, a4 -; RV32-NEXT: vand.vx v28, v8, a1 -; RV32-NEXT: vand.vx v24, v24, a1 +; RV32-NEXT: vand.vx v28, v8, a0 +; RV32-NEXT: vand.vx v24, v24, a0 ; RV32-NEXT: vor.vv v20, v24, v20 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v24, (a6), zero -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsll.vx v28, v28, a4 ; RV32-NEXT: vor.vv v16, v16, v28 ; RV32-NEXT: vsrl.vi v28, v8, 8 @@ -1043,51 +1031,49 @@ define @vp_bswap_nxv7i64( %va, @vp_bswap_nxv7i64_unmasked( %va, i32 ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v24, v8, a2 -; RV32-NEXT: addi a1, a3, -256 +; RV32-NEXT: addi a0, a3, -256 ; RV32-NEXT: vsrl.vx v16, v8, a2 ; RV32-NEXT: vsrl.vx v0, v8, a4 -; RV32-NEXT: vand.vx v0, v0, a1 +; RV32-NEXT: vand.vx v0, v0, a0 ; RV32-NEXT: vor.vv v16, v0, v16 -; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: slli a2, a2, 3 -; RV32-NEXT: add a2, sp, a2 -; RV32-NEXT: addi a2, a2, 16 -; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; RV32-NEXT: vand.vx v0, v8, a1 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 3 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: vand.vx v0, v8, a0 ; RV32-NEXT: vsll.vx v0, v0, a4 ; RV32-NEXT: vor.vv v16, v24, v0 -; RV32-NEXT: addi a1, sp, 16 -; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vlse64.v v0, (a6), zero -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 24 ; RV32-NEXT: vand.vx v16, v16, a5 ; RV32-NEXT: vsrl.vi v24, v8, 8 @@ -1221,7 +1205,6 @@ define @vp_bswap_nxv7i64_unmasked( %va, i32 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vsll.vi v24, v24, 8 ; RV32-NEXT: vor.vv v8, v8, v24 -; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: csrr a0, vlenb @@ -1318,51 +1301,49 @@ define @vp_bswap_nxv8i64( %va, @vp_bswap_nxv8i64_unmasked( %va, i32 ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v24, v8, a2 -; RV32-NEXT: addi a1, a3, -256 +; RV32-NEXT: addi a0, a3, -256 ; RV32-NEXT: vsrl.vx v16, v8, a2 ; RV32-NEXT: vsrl.vx v0, v8, a4 -; RV32-NEXT: vand.vx v0, v0, a1 +; RV32-NEXT: vand.vx v0, v0, a0 ; RV32-NEXT: vor.vv v16, v0, v16 -; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: slli a2, a2, 3 -; RV32-NEXT: add a2, sp, a2 -; RV32-NEXT: addi a2, a2, 16 -; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill -; RV32-NEXT: vand.vx v0, v8, a1 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 3 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: vand.vx v0, v8, a0 ; RV32-NEXT: vsll.vx v0, v0, a4 ; RV32-NEXT: vor.vv v16, v24, v0 -; RV32-NEXT: addi a1, sp, 16 -; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vlse64.v v0, (a6), zero -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 24 ; RV32-NEXT: vand.vx v16, v16, a5 ; RV32-NEXT: vsrl.vi v24, v8, 8 @@ -1496,7 +1475,6 @@ define @vp_bswap_nxv8i64_unmasked( %va, i32 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vsll.vi v24, v24, 8 ; RV32-NEXT: vor.vv v8, v8, v24 -; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: csrr a0, vlenb @@ -1716,11 +1694,9 @@ define @vp_bswap_nxv1i48( %va, @vp_bswap_nxv1i48( %va, @select_evl_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c) ; CHECK-NEXT: addi a1, a0, 128 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v16, (a1) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll index 61cc754e21df809..9c733b17dc6e9a8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll @@ -2367,9 +2367,8 @@ define @icmp_eq_vx_nxv1i64( %va, i64 %b, @icmp_eq_vx_swap_nxv1i64( %va, i64 %b ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmseq.vv v0, v9, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -2451,9 +2449,8 @@ define @icmp_ne_vx_nxv1i64( %va, i64 %b, @icmp_ne_vx_swap_nxv1i64( %va, i64 %b ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmsne.vv v0, v9, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -2535,9 +2531,8 @@ define @icmp_ugt_vx_nxv1i64( %va, i64 %b, @icmp_ugt_vx_swap_nxv1i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmsltu.vv v0, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -2619,9 +2613,8 @@ define @icmp_uge_vx_nxv1i64( %va, i64 %b, @icmp_uge_vx_swap_nxv1i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmsleu.vv v0, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -2704,9 +2696,8 @@ define @icmp_ult_vx_nxv1i64( %va, i64 %b, @icmp_ult_vx_swap_nxv1i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmsltu.vv v0, v9, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -2788,9 +2778,8 @@ define @icmp_sgt_vx_nxv1i64( %va, i64 %b, @icmp_sgt_vx_swap_nxv1i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmslt.vv v0, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -2872,9 +2860,8 @@ define @icmp_sge_vx_nxv1i64( %va, i64 %b, @icmp_sge_vx_swap_nxv1i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmsle.vv v0, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -2957,9 +2943,8 @@ define @icmp_slt_vx_nxv1i64( %va, i64 %b, @icmp_slt_vx_swap_nxv1i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmslt.vv v0, v9, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -3041,9 +3025,8 @@ define @icmp_sle_vx_nxv1i64( %va, i64 %b, @icmp_sle_vx_swap_nxv1i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -3129,9 +3111,8 @@ define @icmp_eq_vx_nxv8i64( %va, i64 %b, @icmp_eq_vx_swap_nxv8i64( %va, i64 %b ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmseq.vv v16, v24, v8, v0.t ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3220,9 +3200,8 @@ define @icmp_ne_vx_nxv8i64( %va, i64 %b, @icmp_ne_vx_swap_nxv8i64( %va, i64 %b ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmsne.vv v16, v24, v8, v0.t ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3311,9 +3289,8 @@ define @icmp_ugt_vx_nxv8i64( %va, i64 %b, @icmp_ugt_vx_swap_nxv8i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmsltu.vv v16, v8, v24, v0.t ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3402,9 +3378,8 @@ define @icmp_uge_vx_nxv8i64( %va, i64 %b, @icmp_uge_vx_swap_nxv8i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmsleu.vv v16, v8, v24, v0.t ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3494,9 +3468,8 @@ define @icmp_ult_vx_nxv8i64( %va, i64 %b, @icmp_ult_vx_swap_nxv8i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmsltu.vv v16, v24, v8, v0.t ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3585,9 +3557,8 @@ define @icmp_sgt_vx_nxv8i64( %va, i64 %b, @icmp_sgt_vx_swap_nxv8i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmslt.vv v16, v8, v24, v0.t ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3676,9 +3646,8 @@ define @icmp_sge_vx_nxv8i64( %va, i64 %b, @icmp_sge_vx_swap_nxv8i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmsle.vv v16, v8, v24, v0.t ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3768,9 +3736,8 @@ define @icmp_slt_vx_nxv8i64( %va, i64 %b, @icmp_slt_vx_swap_nxv8i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmslt.vv v16, v24, v8, v0.t ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3859,9 +3825,8 @@ define @icmp_sle_vx_nxv8i64( %va, i64 %b, @icmp_sle_vx_swap_nxv8i64( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmsle.vv v16, v24, v8, v0.t ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll index fee6799e992f31c..77f3cf3ca4980bc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -1487,9 +1487,8 @@ define @vadd_vx_nxv1i64( %va, i64 %b, @vadd_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1583,9 +1581,8 @@ define @vadd_vx_nxv2i64( %va, i64 %b, @vadd_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1679,9 +1675,8 @@ define @vadd_vx_nxv4i64( %va, i64 %b, @vadd_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1775,9 +1769,8 @@ define @vadd_vx_nxv8i64( %va, i64 %b, @vadd_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll index b0c5a72f6f9e9c6..4866bb06f19ec1d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll @@ -1314,9 +1314,8 @@ define @vand_vx_nxv1i64( %va, i64 %b, @vand_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1410,9 +1408,8 @@ define @vand_vx_nxv2i64( %va, i64 %b, @vand_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1506,9 +1502,8 @@ define @vand_vx_nxv4i64( %va, i64 %b, @vand_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1602,9 +1596,8 @@ define @vand_vx_nxv8i64( %va, i64 %b, @vand_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll index 32992301bd39bfe..763b2908b102675 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll @@ -1115,9 +1115,8 @@ define @vandn_vx_vp_nxv1i64(i64 %a, %b, @vandn_vx_vp_nxv1i64(i64 %a, %b, @vandn_vx_vp_nxv2i64(i64 %a, %b, @vandn_vx_vp_nxv2i64(i64 %a, %b, @vandn_vx_vp_nxv4i64(i64 %a, %b, @vandn_vx_vp_nxv4i64(i64 %a, %b, @vandn_vx_vp_nxv8i64(i64 %a, %b, @vandn_vx_vp_nxv8i64(i64 %a, %b, @vdiv_vx_nxv1i64( %va, i64 %b, @vdiv_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -969,9 +967,8 @@ define @vdiv_vx_nxv2i64( %va, i64 %b, @vdiv_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1045,9 +1041,8 @@ define @vdiv_vx_nxv4i64( %va, i64 %b, @vdiv_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1121,9 +1115,8 @@ define @vdiv_vx_nxv8i64( %va, i64 %b, @vdiv_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll index 3e913d4f682ed47..2f35f91d77a4e65 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll @@ -892,9 +892,8 @@ define @vdivu_vx_nxv1i64( %va, i64 %b, @vdivu_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -968,9 +966,8 @@ define @vdivu_vx_nxv2i64( %va, i64 %b, @vdivu_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1044,9 +1040,8 @@ define @vdivu_vx_nxv4i64( %va, i64 %b, @vdivu_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1120,9 +1114,8 @@ define @vdivu_vx_nxv8i64( %va, i64 %b, @vdivu_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir index f9b81863d68d6af..a1bbfc8a7d35144 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir @@ -573,6 +573,36 @@ body: | PseudoVSE8_V_MF2 %x, $noreg, 1, 3 /* e8 */ ... --- +name: vleN_v +body: | + bb.0: + ; CHECK-LABEL: name: vleN_v + ; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */ + ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 + %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 +... +--- +name: vleN_v_incompatible_eew +body: | + bb.0: + ; CHECK-LABEL: name: vleN_v_incompatible_eew + ; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */ + ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */ + %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 + %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 +... +--- +name: vleN_v_incompatible_emul +body: | + bb.0: + ; CHECK-LABEL: name: vleN_v_incompatible_emul + ; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */ + ; CHECK-NEXT: %x:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 + %x:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 +... +--- name: vsseN_v body: | bb.0: @@ -705,6 +735,56 @@ body: | %y:vr = PseudoVLUXEI8_V_MF2_M1 $noreg, $noreg, %x, 1, 4 /* e16 */, 0 ... --- +name: vluxeiN_v_idx_incompatible_eew +body: | + bb.0: + ; CHECK-LABEL: name: vluxeiN_v_idx_incompatible_eew + ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */ + ; CHECK-NEXT: %y:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 + %y:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 +... +--- +name: vluxeiN_v_idx_incompatible_emul +body: | + bb.0: + ; CHECK-LABEL: name: vluxeiN_v_idx_incompatible_emul + ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */ + ; CHECK-NEXT: %y:vr = PseudoVLUXEI8_V_MF2_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 + %y:vr = PseudoVLUXEI8_V_MF2_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 +... +--- +name: vluxeiN_v_vd +body: | + bb.0: + ; CHECK-LABEL: name: vluxeiN_v_vd + ; CHECK: %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */ + ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 + %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 +... +--- +name: vluxeiN_v_vd_incompatible_eew +body: | + bb.0: + ; CHECK-LABEL: name: vluxeiN_v_vd_incompatible_eew + ; CHECK: %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */ + ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */ + %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 + %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 +... +--- +name: vluxeiN_vd_incompatible_emul +body: | + bb.0: + ; CHECK-LABEL: name: vluxeiN_vd_incompatible_emul + ; CHECK: %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */ + ; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 + %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 +... +--- name: vmop_mm body: | bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vlopt-volatile-ld.mir b/llvm/test/CodeGen/RISCV/rvv/vlopt-volatile-ld.mir new file mode 100644 index 000000000000000..e8f7957de47ca95 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlopt-volatile-ld.mir @@ -0,0 +1,13 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# RUN: llc %s -o - -mtriple=riscv64 -mattr=+v -run-pass=riscv-vl-optimizer -verify-machineinstrs | FileCheck %s + +--- +name: vleN_v_volatile +body: | + bb.0: + ; CHECK-LABEL: name: vleN_v + ; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */ :: (volatile load ()) + ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */ + %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 :: (volatile load ()) + %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 +... diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll index 333117c8dce2d8a..c334e70f1f358fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll @@ -1654,9 +1654,9 @@ define @vmacc_vx_nxv1i64( %a, i64 %b, @vmacc_vx_nxv1i64_unmasked( %a, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV32-NEXT: vmacc.vv v9, v8, v10 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1729,9 +1729,8 @@ define @vmacc_vx_nxv1i64_ta( %a, i64 %b, @vmacc_vx_nxv2i64( %a, i64 %b, @vmacc_vx_nxv2i64_unmasked( %a, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma ; RV32-NEXT: vmacc.vv v10, v8, v12 ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1866,9 +1865,8 @@ define @vmacc_vx_nxv2i64_ta( %a, i64 %b, @vmacc_vx_nxv4i64( %a, i64 %b, @vmacc_vx_nxv4i64_unmasked( %a, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma +; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; RV32-NEXT: vmacc.vv v12, v8, v16 ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2003,9 +2001,8 @@ define @vmacc_vx_nxv4i64_ta( %a, i64 %b, @vmacc_vx_nxv8i64( %a, i64 %b, @vmacc_vx_nxv8i64_unmasked( %a, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, ma +; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma ; RV32-NEXT: vmacc.vv v16, v8, v24 ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2143,9 +2140,8 @@ define @vmacc_vx_nxv8i64_ta( %a, i64 %b, @vmax_vx_nxv1i64( %va, i64 %b, @vmax_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1200,9 +1198,8 @@ define @vmax_vx_nxv2i64( %va, i64 %b, @vmax_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1276,9 +1272,8 @@ define @vmax_vx_nxv4i64( %va, i64 %b, @vmax_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1352,9 +1346,8 @@ define @vmax_vx_nxv8i64( %va, i64 %b, @vmax_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll index 674b0b8060003fa..8147d467be04e3c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll @@ -1123,9 +1123,8 @@ define @vmaxu_vx_nxv1i64( %va, i64 %b, @vmaxu_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1199,9 +1197,8 @@ define @vmaxu_vx_nxv2i64( %va, i64 %b, @vmaxu_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1275,9 +1271,8 @@ define @vmaxu_vx_nxv4i64( %va, i64 %b, @vmaxu_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1351,9 +1345,8 @@ define @vmaxu_vx_nxv8i64( %va, i64 %b, @vmaxu_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll index 79631cd80594c9e..614bd4cbde9ec6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll @@ -1124,9 +1124,8 @@ define @vmin_vx_nxv1i64( %va, i64 %b, @vmin_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1200,9 +1198,8 @@ define @vmin_vx_nxv2i64( %va, i64 %b, @vmin_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1276,9 +1272,8 @@ define @vmin_vx_nxv4i64( %va, i64 %b, @vmin_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1352,9 +1346,8 @@ define @vmin_vx_nxv8i64( %va, i64 %b, @vmin_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll index bc93b62fab7fcde..21160553af59d3f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll @@ -1123,9 +1123,8 @@ define @vminu_vx_nxv1i64( %va, i64 %b, @vminu_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1199,9 +1197,8 @@ define @vminu_vx_nxv2i64( %va, i64 %b, @vminu_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1275,9 +1271,8 @@ define @vminu_vx_nxv4i64( %va, i64 %b, @vminu_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1351,9 +1345,8 @@ define @vminu_vx_nxv8i64( %va, i64 %b, @vminu_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll index b63098b64e292e7..f0907e41cd43d28 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll @@ -934,9 +934,8 @@ define @vmul_vx_nxv1i64( %va, i64 %b, @vmul_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1010,9 +1008,8 @@ define @vmul_vx_nxv2i64( %va, i64 %b, @vmul_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1086,9 +1082,8 @@ define @vmul_vx_nxv4i64( %va, i64 %b, @vmul_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1162,9 +1156,8 @@ define @vmul_vx_nxv8i64( %va, i64 %b, @vmul_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll index 2e0daa66c82a7d3..3484d288088a058 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll @@ -1654,9 +1654,9 @@ define @vnmsac_vx_nxv1i64( %a, i64 %b, @vnmsac_vx_nxv1i64_unmasked( %a, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV32-NEXT: vnmsac.vv v9, v8, v10 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1729,9 +1729,8 @@ define @vnmsac_vx_nxv1i64_ta( %a, i64 %b, < ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vnmsac.vv v9, v8, v10, v0.t ; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1791,9 +1790,9 @@ define @vnmsac_vx_nxv2i64( %a, i64 %b, @vnmsac_vx_nxv2i64_unmasked( %a, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma ; RV32-NEXT: vnmsac.vv v10, v8, v12 ; RV32-NEXT: vmv2r.v v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1866,9 +1865,8 @@ define @vnmsac_vx_nxv2i64_ta( %a, i64 %b, < ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vnmsac.vv v10, v8, v12, v0.t ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1928,9 +1926,9 @@ define @vnmsac_vx_nxv4i64( %a, i64 %b, @vnmsac_vx_nxv4i64_unmasked( %a, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma +; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; RV32-NEXT: vnmsac.vv v12, v8, v16 ; RV32-NEXT: vmv4r.v v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2003,9 +2001,8 @@ define @vnmsac_vx_nxv4i64_ta( %a, i64 %b, < ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vnmsac.vv v12, v8, v16, v0.t ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2067,9 +2064,9 @@ define @vnmsac_vx_nxv8i64( %a, i64 %b, @vnmsac_vx_nxv8i64_unmasked( %a, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, ma +; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma ; RV32-NEXT: vnmsac.vv v16, v8, v24 ; RV32-NEXT: vmv8r.v v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2143,9 +2140,8 @@ define @vnmsac_vx_nxv8i64_ta( %a, i64 %b, < ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vnmsac.vv v16, v8, v24, v0.t ; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll index ef281c52838f6f8..e864d71fdad1108 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll @@ -1326,9 +1326,8 @@ define @vor_vx_nxv1i64( %va, i64 %b, @vor_vx_nxv1i64_unmasked( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1422,9 +1420,8 @@ define @vor_vx_nxv2i64( %va, i64 %b, @vor_vx_nxv2i64_unmasked( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1518,9 +1514,8 @@ define @vor_vx_nxv4i64( %va, i64 %b, @vor_vx_nxv4i64_unmasked( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1614,9 +1608,8 @@ define @vor_vx_nxv8i64( %va, i64 %b, @vor_vx_nxv8i64_unmasked( %va, i64 % ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll index 3273274a70b415d..66ba2697fe5f649 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll @@ -893,9 +893,8 @@ define @vrem_vx_nxv1i64( %va, i64 %b, @vrem_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -969,9 +967,8 @@ define @vrem_vx_nxv2i64( %va, i64 %b, @vrem_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1045,9 +1041,8 @@ define @vrem_vx_nxv4i64( %va, i64 %b, @vrem_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1121,9 +1115,8 @@ define @vrem_vx_nxv8i64( %va, i64 %b, @vrem_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll index 6b588d0917ff02d..4608661eb5df3dc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll @@ -892,9 +892,8 @@ define @vremu_vx_nxv1i64( %va, i64 %b, @vremu_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -968,9 +966,8 @@ define @vremu_vx_nxv2i64( %va, i64 %b, @vremu_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1044,9 +1040,8 @@ define @vremu_vx_nxv4i64( %va, i64 %b, @vremu_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1120,9 +1114,8 @@ define @vremu_vx_nxv8i64( %va, i64 %b, @vremu_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll index 0f38e9408fb5f74..c41139c64eb0886 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll @@ -842,9 +842,8 @@ define @vrsub_vx_nxv1i64( %va, i64 %b, @vrsub_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -918,9 +916,8 @@ define @vrsub_vx_nxv2i64( %va, i64 %b, @vrsub_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -994,9 +990,8 @@ define @vrsub_vx_nxv4i64( %va, i64 %b, @vrsub_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1070,9 +1064,8 @@ define @vrsub_vx_nxv8i64( %va, i64 %b, @vrsub_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll index 575d041b091dd23..e471f4b2e92b5fb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll @@ -1425,9 +1425,8 @@ define @vsadd_vx_nxv1i64( %va, i64 %b, @vsadd_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1521,9 +1519,8 @@ define @vsadd_vx_nxv2i64( %va, i64 %b, @vsadd_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1617,9 +1613,8 @@ define @vsadd_vx_nxv4i64( %va, i64 %b, @vsadd_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1713,9 +1707,8 @@ define @vsadd_vx_nxv8i64( %va, i64 %b, @vsadd_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll index c9ed72bc63da21d..f76a2b4b78bcaca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll @@ -1424,9 +1424,8 @@ define @vsaddu_vx_nxv1i64( %va, i64 %b, @vsaddu_vx_nxv1i64_unmasked( %va, i6 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1520,9 +1518,8 @@ define @vsaddu_vx_nxv2i64( %va, i64 %b, @vsaddu_vx_nxv2i64_unmasked( %va, i6 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1616,9 +1612,8 @@ define @vsaddu_vx_nxv4i64( %va, i64 %b, @vsaddu_vx_nxv4i64_unmasked( %va, i6 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1712,9 +1706,8 @@ define @vsaddu_vx_nxv8i64( %va, i64 %b, @vsaddu_vx_nxv8i64_unmasked( %va, i6 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll index c0da928a72e9ad8..ebf8d5eeb40bc0e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll @@ -1468,9 +1468,8 @@ define @vssub_vx_nxv1i64( %va, i64 %b, @vssub_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1566,9 +1564,8 @@ define @vssub_vx_nxv2i64( %va, i64 %b, @vssub_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1664,9 +1660,8 @@ define @vssub_vx_nxv4i64( %va, i64 %b, @vssub_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1762,9 +1756,8 @@ define @vssub_vx_nxv8i64( %va, i64 %b, @vssub_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll index b602f11e2c805f3..d54901c93d53cf8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll @@ -1466,9 +1466,8 @@ define @vssubu_vx_nxv1i64( %va, i64 %b, @vssubu_vx_nxv1i64_unmasked( %va, i6 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1564,9 +1562,8 @@ define @vssubu_vx_nxv2i64( %va, i64 %b, @vssubu_vx_nxv2i64_unmasked( %va, i6 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1662,9 +1658,8 @@ define @vssubu_vx_nxv4i64( %va, i64 %b, @vssubu_vx_nxv4i64_unmasked( %va, i6 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1760,9 +1754,8 @@ define @vssubu_vx_nxv8i64( %va, i64 %b, @vssubu_vx_nxv8i64_unmasked( %va, i6 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll index 65ba791fe7801f4..e28da6bc4ec6486 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll @@ -922,9 +922,8 @@ define @vsub_vx_nxv1i64( %va, i64 %b, @vsub_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -998,9 +996,8 @@ define @vsub_vx_nxv2i64( %va, i64 %b, @vsub_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1074,9 +1070,8 @@ define @vsub_vx_nxv4i64( %va, i64 %b, @vsub_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1150,9 +1144,8 @@ define @vsub_vx_nxv8i64( %va, i64 %b, @vsub_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll index f3dd7ec48881f91..1694a7af0a0b903 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -1694,9 +1694,8 @@ define @vxor_vx_nxv1i64( %va, i64 %b, @vxor_vx_nxv1i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1810,9 +1808,8 @@ define @vxor_vx_nxv2i64( %va, i64 %b, @vxor_vx_nxv2i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -1926,9 +1922,8 @@ define @vxor_vx_nxv4i64( %va, i64 %b, @vxor_vx_nxv4i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 @@ -2042,9 +2036,8 @@ define @vxor_vx_nxv8i64( %va, i64 %b, @vxor_vx_nxv8i64_unmasked( %va, i64 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0