diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index b3cc33d31761d..552432c4b156a 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -1326,6 +1326,22 @@ foreach fvti = AllFloatVectors in { // RISCVInsertReadWriteCSR FRM_DYN, fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + def : Pat<(fvti.Vector (any_fma (SplatFPOp (fneg fvti.ScalarRegClass:$rs1)), + fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), + (!cast("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix) + fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + def : Pat<(fvti.Vector (any_fma (SplatFPOp (fneg fvti.ScalarRegClass:$rs1)), + fvti.RegClass:$rd, fvti.RegClass:$rs2)), + (!cast("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) + fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll index 1047860ec8db6..6f997082a3d35 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll @@ -33,3 +33,49 @@ define @vsplat_f64_neg1() { ; CHECK-NEXT: ret ret splat (double -1.0) } + +define @vfnmsac( %va, %vb) { +; CHECK-LABEL: vfnmsac: +; CHECK: # %bb.0: +; CHECK-NEXT: fli.s fa5, 2.0 +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfnmsac.vf v8, fa5, v10 +; CHECK-NEXT: ret + %vd = tail call @llvm.fmuladd.nxv4f32( %vb, splat (float -2.000000e+00), %va) + ret %vd +} + +define @vfnmsub( %va, %vb) { +; CHECK-LABEL: vfnmsub: +; CHECK: # %bb.0: +; CHECK-NEXT: fli.s fa5, 2.0 +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfnmsub.vf v8, fa5, v10 +; CHECK-NEXT: ret + %vd = tail call @llvm.fmuladd.nxv4f32( splat (float -2.000000e+00), %va, %vb) + ret %vd +} + +define @vfnmacc( %va, %vb) { +; CHECK-LABEL: vfnmacc: +; CHECK: # %bb.0: +; CHECK-NEXT: fli.s fa5, 2.0 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfnmacc.vf v8, fa5, v12 +; CHECK-NEXT: ret + %neg = fneg %va + %vd = call @llvm.fma.v8f32( %vb, splat (float -2.000000e+00), %neg) + ret %vd +} + +define @vfnmadd( %va, %vb) { +; CHECK-LABEL: vfnmadd: +; CHECK: # %bb.0: +; CHECK-NEXT: fli.s fa5, 2.0 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfnmadd.vf v8, fa5, v12 +; CHECK-NEXT: ret + %neg = fneg %vb + %vd = call @llvm.fma.v8f32( %va, splat (float -2.000000e+00), %neg) + ret %vd +}