From 81253380dd14a32553ae87566571095cc2f66ed9 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Thu, 17 Jan 2019 10:04:39 +0000 Subject: [PATCH 01/11] [RISCV] Add codegen support for RV64A In order to support codegen RV64A, this patch: * Introduces masked atomics intrinsics for atomicrmw operations and cmpxchg that use the i64 type. These are ultimately lowered to masked operations using lr.w/sc.w, but we need to use these alternate intrinsics for RV64 because i32 is not legal * Modifies RISCVExpandPseudoInsts.cpp to handle PseudoAtomicLoadNand64 and PseudoCmpXchg64 * Modifies the AtomicExpandPass hooks in RISCVTargetLowering to sext/trunc as needed for RV64 and to select the i64 intrinsic IDs when necessary * Adds appropriate patterns to RISCVInstrInfoA.td * Updates test/CodeGen/RISCV/atomic-*.ll to show RV64A support This ends up being a fairly mechanical change, as the logic for RV32A is effectively reused. Differential Revision: https://reviews.llvm.org/D53233 llvm-svn: 351422 --- llvm/include/llvm/IR/IntrinsicsRISCV.td | 25 + .../Target/RISCV/RISCVExpandPseudoInsts.cpp | 81 +- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 117 +- llvm/lib/Target/RISCV/RISCVInstrInfoA.td | 80 +- llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll | 712 ++++ llvm/test/CodeGen/RISCV/atomic-load-store.ll | 182 + llvm/test/CodeGen/RISCV/atomic-rmw.ll | 3017 +++++++++++++++++ 7 files changed, 4166 insertions(+), 48 deletions(-) diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 0ac7348b56dbf..22d0037c3d643 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -41,4 +41,29 @@ def int_riscv_masked_cmpxchg_i32 llvm_i32_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture<0>]>; +class MaskedAtomicRMW64Intrinsic + : Intrinsic<[llvm_i64_ty], + [llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty], + [IntrArgMemOnly, NoCapture<0>]>; + +class MaskedAtomicRMW64WithSextIntrinsic + : Intrinsic<[llvm_i64_ty], + [llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty, + llvm_i64_ty], + [IntrArgMemOnly, NoCapture<0>]>; + +def int_riscv_masked_atomicrmw_xchg_i64 : MaskedAtomicRMW64Intrinsic; +def int_riscv_masked_atomicrmw_add_i64 : MaskedAtomicRMW64Intrinsic; +def int_riscv_masked_atomicrmw_sub_i64 : MaskedAtomicRMW64Intrinsic; +def int_riscv_masked_atomicrmw_nand_i64 : MaskedAtomicRMW64Intrinsic; +def int_riscv_masked_atomicrmw_max_i64 : MaskedAtomicRMW64WithSextIntrinsic; +def int_riscv_masked_atomicrmw_min_i64 : MaskedAtomicRMW64WithSextIntrinsic; +def int_riscv_masked_atomicrmw_umax_i64 : MaskedAtomicRMW64Intrinsic; +def int_riscv_masked_atomicrmw_umin_i64 : MaskedAtomicRMW64Intrinsic; + +def int_riscv_masked_cmpxchg_i64 + : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty, + llvm_i64_ty, llvm_i64_ty], + [IntrArgMemOnly, NoCapture<0>]>; + } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp index 35c185aa5edd3..55275de50206b 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -87,6 +87,9 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB, case RISCV::PseudoAtomicLoadNand32: return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32, NextMBBI); + case RISCV::PseudoAtomicLoadNand64: + return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64, + NextMBBI); case RISCV::PseudoMaskedAtomicSwap32: return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32, NextMBBI); @@ -111,6 +114,8 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB, NextMBBI); case RISCV::PseudoCmpXchg32: return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI); + case RISCV::PseudoCmpXchg64: + return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI); case RISCV::PseudoMaskedCmpXchg32: return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI); } @@ -152,12 +157,61 @@ static unsigned getSCForRMW32(AtomicOrdering Ordering) { } } +static unsigned getLRForRMW64(AtomicOrdering Ordering) { + switch (Ordering) { + default: + llvm_unreachable("Unexpected AtomicOrdering"); + case AtomicOrdering::Monotonic: + return RISCV::LR_D; + case AtomicOrdering::Acquire: + return RISCV::LR_D_AQ; + case AtomicOrdering::Release: + return RISCV::LR_D; + case AtomicOrdering::AcquireRelease: + return RISCV::LR_D_AQ; + case AtomicOrdering::SequentiallyConsistent: + return RISCV::LR_D_AQ_RL; + } +} + +static unsigned getSCForRMW64(AtomicOrdering Ordering) { + switch (Ordering) { + default: + llvm_unreachable("Unexpected AtomicOrdering"); + case AtomicOrdering::Monotonic: + return RISCV::SC_D; + case AtomicOrdering::Acquire: + return RISCV::SC_D; + case AtomicOrdering::Release: + return RISCV::SC_D_RL; + case AtomicOrdering::AcquireRelease: + return RISCV::SC_D_RL; + case AtomicOrdering::SequentiallyConsistent: + return RISCV::SC_D_AQ_RL; + } +} + +static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) { + if (Width == 32) + return getLRForRMW32(Ordering); + if (Width == 64) + return getLRForRMW64(Ordering); + llvm_unreachable("Unexpected LR width\n"); +} + +static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) { + if (Width == 32) + return getSCForRMW32(Ordering); + if (Width == 64) + return getSCForRMW64(Ordering); + llvm_unreachable("Unexpected SC width\n"); +} + static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL, MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB, MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) { - assert(Width == 32 && "RV64 atomic expansion currently unsupported"); unsigned DestReg = MI.getOperand(0).getReg(); unsigned ScratchReg = MI.getOperand(1).getReg(); unsigned AddrReg = MI.getOperand(2).getReg(); @@ -166,11 +220,11 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI, static_cast(MI.getOperand(4).getImm()); // .loop: - // lr.w dest, (addr) + // lr.[w|d] dest, (addr) // binop scratch, dest, val - // sc.w scratch, scratch, (addr) + // sc.[w|d] scratch, scratch, (addr) // bnez scratch, loop - BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg) + BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg) .addReg(AddrReg); switch (BinOp) { default: @@ -184,7 +238,7 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI, .addImm(-1); break; } - BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg) + BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg) .addReg(AddrReg) .addReg(ScratchReg); BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) @@ -219,7 +273,7 @@ static void doMaskedAtomicBinOpExpansion( const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL, MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB, MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) { - assert(Width == 32 && "RV64 atomic expansion currently unsupported"); + assert(Width == 32 && "Should never need to expand masked 64-bit operations"); unsigned DestReg = MI.getOperand(0).getReg(); unsigned ScratchReg = MI.getOperand(1).getReg(); unsigned AddrReg = MI.getOperand(2).getReg(); @@ -333,7 +387,7 @@ bool RISCVExpandPseudo::expandAtomicMinMaxOp( MachineBasicBlock::iterator &NextMBBI) { assert(IsMasked == true && "Should only need to expand masked atomic max/min"); - assert(Width == 32 && "RV64 atomic expansion currently unsupported"); + assert(Width == 32 && "Should never need to expand masked 64-bit operations"); MachineInstr &MI = *MBBI; DebugLoc DL = MI.getDebugLoc(); @@ -451,7 +505,6 @@ bool RISCVExpandPseudo::expandAtomicMinMaxOp( bool RISCVExpandPseudo::expandAtomicCmpXchg( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked, int Width, MachineBasicBlock::iterator &NextMBBI) { - assert(Width == 32 && "RV64 atomic expansion currently unsupported"); MachineInstr &MI = *MBBI; DebugLoc DL = MI.getDebugLoc(); MachineFunction *MF = MBB.getParent(); @@ -483,18 +536,18 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg( if (!IsMasked) { // .loophead: - // lr.w dest, (addr) + // lr.[w|d] dest, (addr) // bne dest, cmpval, done - BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg) + BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg) .addReg(AddrReg); BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE)) .addReg(DestReg) .addReg(CmpValReg) .addMBB(DoneMBB); // .looptail: - // sc.w scratch, newval, (addr) + // sc.[w|d] scratch, newval, (addr) // bnez scratch, loophead - BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg) + BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg) .addReg(AddrReg) .addReg(NewValReg); BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE)) @@ -507,7 +560,7 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg( // and scratch, dest, mask // bne scratch, cmpval, done unsigned MaskReg = MI.getOperand(5).getReg(); - BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg) + BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg) .addReg(AddrReg); BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg) .addReg(DestReg) @@ -525,7 +578,7 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg( // bnez scratch, loophead insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg, MaskReg, ScratchReg); - BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg) + BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg) .addReg(AddrReg) .addReg(ScratchReg); BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE)) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 508dcbd009edf..5f55cea4f563a 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1728,37 +1728,74 @@ RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { } static Intrinsic::ID -getIntrinsicForMaskedAtomicRMWBinOp32(AtomicRMWInst::BinOp BinOp) { - switch (BinOp) { - default: - llvm_unreachable("Unexpected AtomicRMW BinOp"); - case AtomicRMWInst::Xchg: - return Intrinsic::riscv_masked_atomicrmw_xchg_i32; - case AtomicRMWInst::Add: - return Intrinsic::riscv_masked_atomicrmw_add_i32; - case AtomicRMWInst::Sub: - return Intrinsic::riscv_masked_atomicrmw_sub_i32; - case AtomicRMWInst::Nand: - return Intrinsic::riscv_masked_atomicrmw_nand_i32; - case AtomicRMWInst::Max: - return Intrinsic::riscv_masked_atomicrmw_max_i32; - case AtomicRMWInst::Min: - return Intrinsic::riscv_masked_atomicrmw_min_i32; - case AtomicRMWInst::UMax: - return Intrinsic::riscv_masked_atomicrmw_umax_i32; - case AtomicRMWInst::UMin: - return Intrinsic::riscv_masked_atomicrmw_umin_i32; +getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { + if (XLen == 32) { + switch (BinOp) { + default: + llvm_unreachable("Unexpected AtomicRMW BinOp"); + case AtomicRMWInst::Xchg: + return Intrinsic::riscv_masked_atomicrmw_xchg_i32; + case AtomicRMWInst::Add: + return Intrinsic::riscv_masked_atomicrmw_add_i32; + case AtomicRMWInst::Sub: + return Intrinsic::riscv_masked_atomicrmw_sub_i32; + case AtomicRMWInst::Nand: + return Intrinsic::riscv_masked_atomicrmw_nand_i32; + case AtomicRMWInst::Max: + return Intrinsic::riscv_masked_atomicrmw_max_i32; + case AtomicRMWInst::Min: + return Intrinsic::riscv_masked_atomicrmw_min_i32; + case AtomicRMWInst::UMax: + return Intrinsic::riscv_masked_atomicrmw_umax_i32; + case AtomicRMWInst::UMin: + return Intrinsic::riscv_masked_atomicrmw_umin_i32; + } + } + + if (XLen == 64) { + switch (BinOp) { + default: + llvm_unreachable("Unexpected AtomicRMW BinOp"); + case AtomicRMWInst::Xchg: + return Intrinsic::riscv_masked_atomicrmw_xchg_i64; + case AtomicRMWInst::Add: + return Intrinsic::riscv_masked_atomicrmw_add_i64; + case AtomicRMWInst::Sub: + return Intrinsic::riscv_masked_atomicrmw_sub_i64; + case AtomicRMWInst::Nand: + return Intrinsic::riscv_masked_atomicrmw_nand_i64; + case AtomicRMWInst::Max: + return Intrinsic::riscv_masked_atomicrmw_max_i64; + case AtomicRMWInst::Min: + return Intrinsic::riscv_masked_atomicrmw_min_i64; + case AtomicRMWInst::UMax: + return Intrinsic::riscv_masked_atomicrmw_umax_i64; + case AtomicRMWInst::UMin: + return Intrinsic::riscv_masked_atomicrmw_umin_i64; + } } + + llvm_unreachable("Unexpected XLen\n"); } Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { - Value *Ordering = Builder.getInt32(static_cast(AI->getOrdering())); + unsigned XLen = Subtarget.getXLen(); + Value *Ordering = + Builder.getIntN(XLen, static_cast(AI->getOrdering())); Type *Tys[] = {AlignedAddr->getType()}; Function *LrwOpScwLoop = Intrinsic::getDeclaration( AI->getModule(), - getIntrinsicForMaskedAtomicRMWBinOp32(AI->getOperation()), Tys); + getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); + + if (XLen == 64) { + Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); + Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); + ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); + } + + Value *Result; // Must pass the shift amount needed to sign extend the loaded value prior // to performing a signed comparison for min/max. ShiftAmt is the number of @@ -1770,13 +1807,18 @@ Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( const DataLayout &DL = AI->getModule()->getDataLayout(); unsigned ValWidth = DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); - Value *SextShamt = Builder.CreateSub( - Builder.getInt32(Subtarget.getXLen() - ValWidth), ShiftAmt); - return Builder.CreateCall(LrwOpScwLoop, - {AlignedAddr, Incr, Mask, SextShamt, Ordering}); + Value *SextShamt = + Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); + Result = Builder.CreateCall(LrwOpScwLoop, + {AlignedAddr, Incr, Mask, SextShamt, Ordering}); + } else { + Result = + Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); } - return Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); + if (XLen == 64) + Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); + return Result; } TargetLowering::AtomicExpansionKind @@ -1791,10 +1833,21 @@ RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { - Value *Ordering = Builder.getInt32(static_cast(Ord)); + unsigned XLen = Subtarget.getXLen(); + Value *Ordering = Builder.getIntN(XLen, static_cast(Ord)); + Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; + if (XLen == 64) { + CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); + NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); + Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); + CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; + } Type *Tys[] = {AlignedAddr->getType()}; - Function *MaskedCmpXchg = Intrinsic::getDeclaration( - CI->getModule(), Intrinsic::riscv_masked_cmpxchg_i32, Tys); - return Builder.CreateCall(MaskedCmpXchg, - {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); + Function *MaskedCmpXchg = + Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); + Value *Result = Builder.CreateCall( + MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); + if (XLen == 64) + Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); + return Result; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td index 9cb1d2f0b627d..0275d32e84caa 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td @@ -235,7 +235,7 @@ def : PseudoMaskedAMOPat { + (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> { let Constraints = "@earlyclobber $res,@earlyclobber $scratch"; let mayLoad = 1; let mayStore = 1; @@ -263,7 +263,7 @@ defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>; def PseudoMaskedCmpXchg32 : Pseudo<(outs GPR:$res, GPR:$scratch), (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, - i32imm:$ordering), []> { + ixlenimm:$ordering), []> { let Constraints = "@earlyclobber $res,@earlyclobber $scratch"; let mayLoad = 1; let mayStore = 1; @@ -276,3 +276,79 @@ def : Pat<(int_riscv_masked_cmpxchg_i32 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>; } // Predicates = [HasStdExtA] + +let Predicates = [HasStdExtA, IsRV64] in { + +/// 64-bit atomic loads and stores + +// Fences will be inserted for atomic load/stores according to the logic in +// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}. +defm : LdPat; +defm : AtomicStPat; + +defm : AMOPat<"atomic_swap_64", "AMOSWAP_D">; +defm : AMOPat<"atomic_load_add_64", "AMOADD_D">; +defm : AMOPat<"atomic_load_and_64", "AMOAND_D">; +defm : AMOPat<"atomic_load_or_64", "AMOOR_D">; +defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D">; +defm : AMOPat<"atomic_load_max_64", "AMOMAX_D">; +defm : AMOPat<"atomic_load_min_64", "AMOMIN_D">; +defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D">; +defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D">; + +/// 64-bit AMOs + +def : Pat<(atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr), + (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>; +def : Pat<(atomic_load_sub_64_acquire GPR:$addr, GPR:$incr), + (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>; +def : Pat<(atomic_load_sub_64_release GPR:$addr, GPR:$incr), + (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>; +def : Pat<(atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr), + (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; +def : Pat<(atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr), + (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; + +/// 64-bit pseudo AMOs + +def PseudoAtomicLoadNand64 : PseudoAMO; +// Ordering constants must be kept in sync with the AtomicOrdering enum in +// AtomicOrdering.h. +def : Pat<(atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr), + (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>; +def : Pat<(atomic_load_nand_64_acquire GPR:$addr, GPR:$incr), + (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>; +def : Pat<(atomic_load_nand_64_release GPR:$addr, GPR:$incr), + (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>; +def : Pat<(atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr), + (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>; +def : Pat<(atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr), + (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>; + +def : PseudoMaskedAMOPat; +def : PseudoMaskedAMOPat; +def : PseudoMaskedAMOPat; +def : PseudoMaskedAMOPat; +def : PseudoMaskedAMOMinMaxPat; +def : PseudoMaskedAMOMinMaxPat; +def : PseudoMaskedAMOPat; +def : PseudoMaskedAMOPat; + +/// 64-bit compare and exchange + +def PseudoCmpXchg64 : PseudoCmpXchg; +defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64>; + +def : Pat<(int_riscv_masked_cmpxchg_i64 + GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering), + (PseudoMaskedCmpXchg32 + GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>; +} // Predicates = [HasStdExtA, IsRV64] diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll index 19b85b6f77a9f..8818645687ce5 100644 --- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll +++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll @@ -5,6 +5,8 @@ ; RUN: | FileCheck -check-prefix=RV32IA %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IA %s define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; RV32I-LABEL: cmpxchg_i8_monotonic_monotonic: @@ -56,6 +58,30 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i8_monotonic_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a4, a4, a3 +; RV64IA-NEXT: andi a2, a2, 255 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 +; RV64IA-NEXT: bne a5, a1, .LBB0_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB0_1 Depth=1 +; RV64IA-NEXT: xor a5, a3, a2 +; RV64IA-NEXT: and a5, a5, a4 +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB0_1 +; RV64IA-NEXT: .LBB0_3: +; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic ret void } @@ -110,6 +136,30 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i8_acquire_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a4, a4, a3 +; RV64IA-NEXT: andi a2, a2, 255 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 +; RV64IA-NEXT: bne a5, a1, .LBB1_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB1_1 Depth=1 +; RV64IA-NEXT: xor a5, a3, a2 +; RV64IA-NEXT: and a5, a5, a4 +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB1_1 +; RV64IA-NEXT: .LBB1_3: +; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic ret void } @@ -164,6 +214,30 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i8_acquire_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a4, a4, a3 +; RV64IA-NEXT: andi a2, a2, 255 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 +; RV64IA-NEXT: bne a5, a1, .LBB2_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB2_1 Depth=1 +; RV64IA-NEXT: xor a5, a3, a2 +; RV64IA-NEXT: and a5, a5, a4 +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB2_1 +; RV64IA-NEXT: .LBB2_3: +; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire ret void } @@ -218,6 +292,30 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i8_release_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a4, a4, a3 +; RV64IA-NEXT: andi a2, a2, 255 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 +; RV64IA-NEXT: bne a5, a1, .LBB3_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB3_1 Depth=1 +; RV64IA-NEXT: xor a5, a3, a2 +; RV64IA-NEXT: and a5, a5, a4 +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB3_1 +; RV64IA-NEXT: .LBB3_3: +; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic ret void } @@ -272,6 +370,30 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i8_release_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a4, a4, a3 +; RV64IA-NEXT: andi a2, a2, 255 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 +; RV64IA-NEXT: bne a5, a1, .LBB4_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB4_1 Depth=1 +; RV64IA-NEXT: xor a5, a3, a2 +; RV64IA-NEXT: and a5, a5, a4 +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB4_1 +; RV64IA-NEXT: .LBB4_3: +; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire ret void } @@ -326,6 +448,30 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i8_acq_rel_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a4, a4, a3 +; RV64IA-NEXT: andi a2, a2, 255 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 +; RV64IA-NEXT: bne a5, a1, .LBB5_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 +; RV64IA-NEXT: xor a5, a3, a2 +; RV64IA-NEXT: and a5, a5, a4 +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB5_1 +; RV64IA-NEXT: .LBB5_3: +; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic ret void } @@ -380,6 +526,30 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i8_acq_rel_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a4, a4, a3 +; RV64IA-NEXT: andi a2, a2, 255 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 +; RV64IA-NEXT: bne a5, a1, .LBB6_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 +; RV64IA-NEXT: xor a5, a3, a2 +; RV64IA-NEXT: and a5, a5, a4 +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB6_1 +; RV64IA-NEXT: .LBB6_3: +; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire ret void } @@ -434,6 +604,30 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i8_seq_cst_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a4, a4, a3 +; RV64IA-NEXT: andi a2, a2, 255 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 +; RV64IA-NEXT: bne a5, a1, .LBB7_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 +; RV64IA-NEXT: xor a5, a3, a2 +; RV64IA-NEXT: and a5, a5, a4 +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB7_1 +; RV64IA-NEXT: .LBB7_3: +; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic ret void } @@ -488,6 +682,30 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i8_seq_cst_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a4, a4, a3 +; RV64IA-NEXT: andi a2, a2, 255 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 +; RV64IA-NEXT: bne a5, a1, .LBB8_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 +; RV64IA-NEXT: xor a5, a3, a2 +; RV64IA-NEXT: and a5, a5, a4 +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB8_1 +; RV64IA-NEXT: .LBB8_3: +; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire ret void } @@ -542,6 +760,30 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i8_seq_cst_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a4, a4, a3 +; RV64IA-NEXT: andi a2, a2, 255 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 +; RV64IA-NEXT: bne a5, a1, .LBB9_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 +; RV64IA-NEXT: xor a5, a3, a2 +; RV64IA-NEXT: and a5, a5, a4 +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB9_1 +; RV64IA-NEXT: .LBB9_3: +; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst ret void } @@ -597,6 +839,31 @@ define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i16_monotonic_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: andi a4, a0, 3 +; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB10_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB10_1 +; RV64IA-NEXT: .LBB10_3: +; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic ret void } @@ -652,6 +919,31 @@ define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i16_acquire_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: andi a4, a0, 3 +; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB11_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB11_1 +; RV64IA-NEXT: .LBB11_3: +; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic ret void } @@ -707,6 +999,31 @@ define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i16_acquire_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: andi a4, a0, 3 +; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB12_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB12_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB12_1 +; RV64IA-NEXT: .LBB12_3: +; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire ret void } @@ -762,6 +1079,31 @@ define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i16_release_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: andi a4, a0, 3 +; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB13_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB13_1 +; RV64IA-NEXT: .LBB13_3: +; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic ret void } @@ -817,6 +1159,31 @@ define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i16_release_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: andi a4, a0, 3 +; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB14_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB14_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB14_1 +; RV64IA-NEXT: .LBB14_3: +; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire ret void } @@ -872,6 +1239,31 @@ define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i16_acq_rel_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: andi a4, a0, 3 +; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB15_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB15_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB15_1 +; RV64IA-NEXT: .LBB15_3: +; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic ret void } @@ -927,6 +1319,31 @@ define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i16_acq_rel_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: andi a4, a0, 3 +; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB16_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB16_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB16_1 +; RV64IA-NEXT: .LBB16_3: +; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire ret void } @@ -982,6 +1399,31 @@ define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i16_seq_cst_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: andi a4, a0, 3 +; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB17_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB17_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB17_1 +; RV64IA-NEXT: .LBB17_3: +; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic ret void } @@ -1037,6 +1479,31 @@ define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i16_seq_cst_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: andi a4, a0, 3 +; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB18_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB18_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB18_1 +; RV64IA-NEXT: .LBB18_3: +; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire ret void } @@ -1092,6 +1559,31 @@ define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i16_seq_cst_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: andi a4, a0, 3 +; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB19_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB19_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB19_1 +; RV64IA-NEXT: .LBB19_3: +; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst ret void } @@ -1133,6 +1625,17 @@ define void @cmpxchg_i32_monotonic_monotonic(i32* %ptr, i32 %cmp, i32 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i32_monotonic_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB20_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB20_1 Depth=1 +; RV64IA-NEXT: sc.w a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB20_1 +; RV64IA-NEXT: .LBB20_3: +; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic ret void } @@ -1174,6 +1677,17 @@ define void @cmpxchg_i32_acquire_monotonic(i32* %ptr, i32 %cmp, i32 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i32_acquire_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB21_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB21_1 Depth=1 +; RV64IA-NEXT: sc.w a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB21_1 +; RV64IA-NEXT: .LBB21_3: +; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic ret void } @@ -1215,6 +1729,17 @@ define void @cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %cmp, i32 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i32_acquire_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB22_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB22_1 Depth=1 +; RV64IA-NEXT: sc.w a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB22_1 +; RV64IA-NEXT: .LBB22_3: +; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire ret void } @@ -1256,6 +1781,17 @@ define void @cmpxchg_i32_release_monotonic(i32* %ptr, i32 %cmp, i32 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i32_release_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB23_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB23_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB23_1 +; RV64IA-NEXT: .LBB23_3: +; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic ret void } @@ -1297,6 +1833,17 @@ define void @cmpxchg_i32_release_acquire(i32* %ptr, i32 %cmp, i32 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i32_release_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB24_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB24_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB24_1 +; RV64IA-NEXT: .LBB24_3: +; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire ret void } @@ -1338,6 +1885,17 @@ define void @cmpxchg_i32_acq_rel_monotonic(i32* %ptr, i32 %cmp, i32 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i32_acq_rel_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB25_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB25_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB25_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB25_1 +; RV64IA-NEXT: .LBB25_3: +; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic ret void } @@ -1379,6 +1937,17 @@ define void @cmpxchg_i32_acq_rel_acquire(i32* %ptr, i32 %cmp, i32 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i32_acq_rel_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB26_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB26_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB26_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB26_1 +; RV64IA-NEXT: .LBB26_3: +; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire ret void } @@ -1420,6 +1989,17 @@ define void @cmpxchg_i32_seq_cst_monotonic(i32* %ptr, i32 %cmp, i32 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i32_seq_cst_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB27_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB27_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB27_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB27_1 +; RV64IA-NEXT: .LBB27_3: +; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic ret void } @@ -1461,6 +2041,17 @@ define void @cmpxchg_i32_seq_cst_acquire(i32* %ptr, i32 %cmp, i32 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i32_seq_cst_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB28_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB28_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB28_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB28_1 +; RV64IA-NEXT: .LBB28_3: +; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire ret void } @@ -1502,6 +2093,17 @@ define void @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 %cmp, i32 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i32_seq_cst_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB29_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB29_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB29_1 +; RV64IA-NEXT: .LBB29_3: +; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst ret void } @@ -1551,6 +2153,17 @@ define void @cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %cmp, i64 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i64_monotonic_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB30_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB30_1 Depth=1 +; RV64IA-NEXT: sc.d a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB30_1 +; RV64IA-NEXT: .LBB30_3: +; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic ret void } @@ -1602,6 +2215,17 @@ define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i64_acquire_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aq a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB31_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB31_1 Depth=1 +; RV64IA-NEXT: sc.d a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB31_1 +; RV64IA-NEXT: .LBB31_3: +; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic ret void } @@ -1651,6 +2275,17 @@ define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i64_acquire_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aq a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB32_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB32_1 Depth=1 +; RV64IA-NEXT: sc.d a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB32_1 +; RV64IA-NEXT: .LBB32_3: +; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire ret void } @@ -1702,6 +2337,17 @@ define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i64_release_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB33_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB33_1 Depth=1 +; RV64IA-NEXT: sc.d.rl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB33_1 +; RV64IA-NEXT: .LBB33_3: +; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic ret void } @@ -1753,6 +2399,17 @@ define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i64_release_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB34_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB34_1 Depth=1 +; RV64IA-NEXT: sc.d.rl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB34_1 +; RV64IA-NEXT: .LBB34_3: +; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire ret void } @@ -1804,6 +2461,17 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i64_acq_rel_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aq a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB35_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB35_1 Depth=1 +; RV64IA-NEXT: sc.d.rl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB35_1 +; RV64IA-NEXT: .LBB35_3: +; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic ret void } @@ -1855,6 +2523,17 @@ define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i64_acq_rel_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aq a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB36_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB36_1 Depth=1 +; RV64IA-NEXT: sc.d.rl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB36_1 +; RV64IA-NEXT: .LBB36_3: +; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire ret void } @@ -1906,6 +2585,17 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i64_seq_cst_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aqrl a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB37_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1 +; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB37_1 +; RV64IA-NEXT: .LBB37_3: +; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic ret void } @@ -1957,6 +2647,17 @@ define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i64_seq_cst_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aqrl a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB38_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB38_1 Depth=1 +; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB38_1 +; RV64IA-NEXT: .LBB38_3: +; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire ret void } @@ -2006,6 +2707,17 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: cmpxchg_i64_seq_cst_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aqrl a3, (a0) +; RV64IA-NEXT: bne a3, a1, .LBB39_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB39_1 Depth=1 +; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0) +; RV64IA-NEXT: bnez a4, .LBB39_1 +; RV64IA-NEXT: .LBB39_3: +; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst ret void } diff --git a/llvm/test/CodeGen/RISCV/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/atomic-load-store.ll index 2df3a9fb319fc..da6f986eb752f 100644 --- a/llvm/test/CodeGen/RISCV/atomic-load-store.ll +++ b/llvm/test/CodeGen/RISCV/atomic-load-store.ll @@ -5,6 +5,8 @@ ; RUN: | FileCheck -check-prefix=RV32IA %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IA %s define i8 @atomic_load_i8_unordered(i8 *%a) nounwind { ; RV32I-LABEL: atomic_load_i8_unordered: @@ -31,6 +33,11 @@ define i8 @atomic_load_i8_unordered(i8 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i8_unordered: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lb a0, 0(a0) +; RV64IA-NEXT: ret %1 = load atomic i8, i8* %a unordered, align 1 ret i8 %1 } @@ -60,6 +67,11 @@ define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lb a0, 0(a0) +; RV64IA-NEXT: ret %1 = load atomic i8, i8* %a monotonic, align 1 ret i8 %1 } @@ -90,6 +102,12 @@ define i8 @atomic_load_i8_acquire(i8 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lb a0, 0(a0) +; RV64IA-NEXT: fence r, rw +; RV64IA-NEXT: ret %1 = load atomic i8, i8* %a acquire, align 1 ret i8 %1 } @@ -121,6 +139,13 @@ define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, rw +; RV64IA-NEXT: lb a0, 0(a0) +; RV64IA-NEXT: fence r, rw +; RV64IA-NEXT: ret %1 = load atomic i8, i8* %a seq_cst, align 1 ret i8 %1 } @@ -150,6 +175,11 @@ define i16 @atomic_load_i16_unordered(i16 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i16_unordered: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lh a0, 0(a0) +; RV64IA-NEXT: ret %1 = load atomic i16, i16* %a unordered, align 2 ret i16 %1 } @@ -179,6 +209,11 @@ define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lh a0, 0(a0) +; RV64IA-NEXT: ret %1 = load atomic i16, i16* %a monotonic, align 2 ret i16 %1 } @@ -209,6 +244,12 @@ define i16 @atomic_load_i16_acquire(i16 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lh a0, 0(a0) +; RV64IA-NEXT: fence r, rw +; RV64IA-NEXT: ret %1 = load atomic i16, i16* %a acquire, align 2 ret i16 %1 } @@ -240,6 +281,13 @@ define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, rw +; RV64IA-NEXT: lh a0, 0(a0) +; RV64IA-NEXT: fence r, rw +; RV64IA-NEXT: ret %1 = load atomic i16, i16* %a seq_cst, align 2 ret i16 %1 } @@ -269,6 +317,11 @@ define i32 @atomic_load_i32_unordered(i32 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i32_unordered: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lw a0, 0(a0) +; RV64IA-NEXT: ret %1 = load atomic i32, i32* %a unordered, align 4 ret i32 %1 } @@ -298,6 +351,11 @@ define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lw a0, 0(a0) +; RV64IA-NEXT: ret %1 = load atomic i32, i32* %a monotonic, align 4 ret i32 %1 } @@ -328,6 +386,12 @@ define i32 @atomic_load_i32_acquire(i32 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lw a0, 0(a0) +; RV64IA-NEXT: fence r, rw +; RV64IA-NEXT: ret %1 = load atomic i32, i32* %a acquire, align 4 ret i32 %1 } @@ -359,6 +423,13 @@ define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, rw +; RV64IA-NEXT: lw a0, 0(a0) +; RV64IA-NEXT: fence r, rw +; RV64IA-NEXT: ret %1 = load atomic i32, i32* %a seq_cst, align 4 ret i32 %1 } @@ -393,6 +464,11 @@ define i64 @atomic_load_i64_unordered(i64 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i64_unordered: +; RV64IA: # %bb.0: +; RV64IA-NEXT: ld a0, 0(a0) +; RV64IA-NEXT: ret %1 = load atomic i64, i64* %a unordered, align 8 ret i64 %1 } @@ -427,6 +503,11 @@ define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: ld a0, 0(a0) +; RV64IA-NEXT: ret %1 = load atomic i64, i64* %a monotonic, align 8 ret i64 %1 } @@ -461,6 +542,12 @@ define i64 @atomic_load_i64_acquire(i64 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: ld a0, 0(a0) +; RV64IA-NEXT: fence r, rw +; RV64IA-NEXT: ret %1 = load atomic i64, i64* %a acquire, align 8 ret i64 %1 } @@ -495,6 +582,13 @@ define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_load_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, rw +; RV64IA-NEXT: ld a0, 0(a0) +; RV64IA-NEXT: fence r, rw +; RV64IA-NEXT: ret %1 = load atomic i64, i64* %a seq_cst, align 8 ret i64 %1 } @@ -524,6 +618,11 @@ define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i8_unordered: +; RV64IA: # %bb.0: +; RV64IA-NEXT: sb a1, 0(a0) +; RV64IA-NEXT: ret store atomic i8 %b, i8* %a unordered, align 1 ret void } @@ -553,6 +652,11 @@ define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: sb a1, 0(a0) +; RV64IA-NEXT: ret store atomic i8 %b, i8* %a monotonic, align 1 ret void } @@ -583,6 +687,12 @@ define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, w +; RV64IA-NEXT: sb a1, 0(a0) +; RV64IA-NEXT: ret store atomic i8 %b, i8* %a release, align 1 ret void } @@ -613,6 +723,12 @@ define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, w +; RV64IA-NEXT: sb a1, 0(a0) +; RV64IA-NEXT: ret store atomic i8 %b, i8* %a seq_cst, align 1 ret void } @@ -642,6 +758,11 @@ define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i16_unordered: +; RV64IA: # %bb.0: +; RV64IA-NEXT: sh a1, 0(a0) +; RV64IA-NEXT: ret store atomic i16 %b, i16* %a unordered, align 2 ret void } @@ -671,6 +792,11 @@ define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: sh a1, 0(a0) +; RV64IA-NEXT: ret store atomic i16 %b, i16* %a monotonic, align 2 ret void } @@ -701,6 +827,12 @@ define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, w +; RV64IA-NEXT: sh a1, 0(a0) +; RV64IA-NEXT: ret store atomic i16 %b, i16* %a release, align 2 ret void } @@ -731,6 +863,12 @@ define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, w +; RV64IA-NEXT: sh a1, 0(a0) +; RV64IA-NEXT: ret store atomic i16 %b, i16* %a seq_cst, align 2 ret void } @@ -760,6 +898,11 @@ define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i32_unordered: +; RV64IA: # %bb.0: +; RV64IA-NEXT: sw a1, 0(a0) +; RV64IA-NEXT: ret store atomic i32 %b, i32* %a unordered, align 4 ret void } @@ -789,6 +932,11 @@ define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: sw a1, 0(a0) +; RV64IA-NEXT: ret store atomic i32 %b, i32* %a monotonic, align 4 ret void } @@ -819,6 +967,12 @@ define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, w +; RV64IA-NEXT: sw a1, 0(a0) +; RV64IA-NEXT: ret store atomic i32 %b, i32* %a release, align 4 ret void } @@ -849,6 +1003,12 @@ define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, w +; RV64IA-NEXT: sw a1, 0(a0) +; RV64IA-NEXT: ret store atomic i32 %b, i32* %a seq_cst, align 4 ret void } @@ -883,6 +1043,11 @@ define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i64_unordered: +; RV64IA: # %bb.0: +; RV64IA-NEXT: sd a1, 0(a0) +; RV64IA-NEXT: ret store atomic i64 %b, i64* %a unordered, align 8 ret void } @@ -917,6 +1082,11 @@ define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: sd a1, 0(a0) +; RV64IA-NEXT: ret store atomic i64 %b, i64* %a monotonic, align 8 ret void } @@ -951,6 +1121,12 @@ define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, w +; RV64IA-NEXT: sd a1, 0(a0) +; RV64IA-NEXT: ret store atomic i64 %b, i64* %a release, align 8 ret void } @@ -985,6 +1161,12 @@ define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomic_store_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: fence rw, w +; RV64IA-NEXT: sd a1, 0(a0) +; RV64IA-NEXT: ret store atomic i64 %b, i64* %a seq_cst, align 8 ret void } diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll index 256a669c1e82e..1bd5e9e7f5f47 100644 --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -5,6 +5,8 @@ ; RUN: | FileCheck -check-prefix=RV32IA %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IA %s define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) { ; RV32I-LABEL: atomicrmw_xchg_i8_monotonic: @@ -47,6 +49,27 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB0_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b monotonic ret i8 %1 } @@ -92,6 +115,27 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB1_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b acquire ret i8 %1 } @@ -137,6 +181,27 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB2_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b release ret i8 %1 } @@ -182,6 +247,27 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB3_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b acq_rel ret i8 %1 } @@ -227,6 +313,27 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB4_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b seq_cst ret i8 %1 } @@ -272,6 +379,27 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB5_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b monotonic ret i8 %1 } @@ -317,6 +445,27 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB6_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b acquire ret i8 %1 } @@ -362,6 +511,27 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB7_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b release ret i8 %1 } @@ -407,6 +577,27 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB8_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b acq_rel ret i8 %1 } @@ -452,6 +643,27 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB9_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b seq_cst ret i8 %1 } @@ -497,6 +709,27 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB10_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b monotonic ret i8 %1 } @@ -542,6 +775,27 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB11_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b acquire ret i8 %1 } @@ -587,6 +841,27 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB12_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b release ret i8 %1 } @@ -632,6 +907,27 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB13_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b acq_rel ret i8 %1 } @@ -677,6 +973,27 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB14_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b seq_cst ret i8 %1 } @@ -716,6 +1033,21 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: or a1, a3, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b monotonic ret i8 %1 } @@ -755,6 +1087,21 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: or a1, a3, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b acquire ret i8 %1 } @@ -794,6 +1141,21 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: or a1, a3, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b release ret i8 %1 } @@ -833,6 +1195,21 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: or a1, a3, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b acq_rel ret i8 %1 } @@ -872,6 +1249,21 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: or a1, a3, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b seq_cst ret i8 %1 } @@ -918,6 +1310,28 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB20_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b monotonic ret i8 %1 } @@ -964,6 +1378,28 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB21_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b acquire ret i8 %1 } @@ -1010,6 +1446,28 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB22_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b release ret i8 %1 } @@ -1056,6 +1514,28 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB23_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b acq_rel ret i8 %1 } @@ -1102,6 +1582,28 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB24_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b seq_cst ret i8 %1 } @@ -1137,6 +1639,17 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b monotonic ret i8 %1 } @@ -1172,6 +1685,17 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b acquire ret i8 %1 } @@ -1207,6 +1731,17 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b release ret i8 %1 } @@ -1242,6 +1777,17 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b acq_rel ret i8 %1 } @@ -1277,6 +1823,17 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b seq_cst ret i8 %1 } @@ -1312,6 +1869,17 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b monotonic ret i8 %1 } @@ -1347,6 +1915,17 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b acquire ret i8 %1 } @@ -1382,6 +1961,17 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b release ret i8 %1 } @@ -1417,6 +2007,17 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b acq_rel ret i8 %1 } @@ -1452,6 +2053,17 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b seq_cst ret i8 %1 } @@ -1572,6 +2184,36 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB35_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB35_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB35_3: # in Loop: Header=BB35_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB35_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b monotonic ret i8 %1 } @@ -1698,6 +2340,36 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB36_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB36_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB36_3: # in Loop: Header=BB36_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB36_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b acquire ret i8 %1 } @@ -1824,6 +2496,36 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB37_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB37_3: # in Loop: Header=BB37_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB37_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b release ret i8 %1 } @@ -1956,6 +2658,36 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB38_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB38_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB38_3: # in Loop: Header=BB38_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB38_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b acq_rel ret i8 %1 } @@ -2082,6 +2814,36 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB39_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB39_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB39_3: # in Loop: Header=BB39_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB39_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b seq_cst ret i8 %1 } @@ -2202,6 +2964,36 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB40_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB40_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB40_3: # in Loop: Header=BB40_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB40_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b monotonic ret i8 %1 } @@ -2328,6 +3120,36 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB41_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB41_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB41_3: # in Loop: Header=BB41_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB41_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b acquire ret i8 %1 } @@ -2454,6 +3276,36 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB42_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB42_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB42_3: # in Loop: Header=BB42_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB42_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b release ret i8 %1 } @@ -2586,6 +3438,36 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB43_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB43_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB43_3: # in Loop: Header=BB43_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB43_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b acq_rel ret i8 %1 } @@ -2712,6 +3594,36 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB44_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB44_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB44_3: # in Loop: Header=BB44_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB44_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b seq_cst ret i8 %1 } @@ -2823,6 +3735,31 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a3, a1, .LBB45_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB45_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB45_3: # in Loop: Header=BB45_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB45_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b monotonic ret i8 %1 } @@ -2940,6 +3877,31 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB46_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a3, a1, .LBB46_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB46_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB46_3: # in Loop: Header=BB46_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB46_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b acquire ret i8 %1 } @@ -3057,6 +4019,31 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB47_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a3, a1, .LBB47_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB47_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB47_3: # in Loop: Header=BB47_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB47_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b release ret i8 %1 } @@ -3180,6 +4167,31 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a3, a1, .LBB48_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB48_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB48_3: # in Loop: Header=BB48_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB48_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b acq_rel ret i8 %1 } @@ -3297,6 +4309,31 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a3, a1, .LBB49_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB49_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB49_3: # in Loop: Header=BB49_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB49_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b seq_cst ret i8 %1 } @@ -3408,6 +4445,31 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a3, .LBB50_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB50_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB50_3: # in Loop: Header=BB50_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB50_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b monotonic ret i8 %1 } @@ -3525,6 +4587,31 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB51_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a3, .LBB51_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB51_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB51_3: # in Loop: Header=BB51_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB51_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b acquire ret i8 %1 } @@ -3642,6 +4729,31 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a3, .LBB52_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB52_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB52_3: # in Loop: Header=BB52_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB52_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b release ret i8 %1 } @@ -3765,6 +4877,31 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB53_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a3, .LBB53_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB53_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB53_3: # in Loop: Header=BB53_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB53_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b acq_rel ret i8 %1 } @@ -3882,6 +5019,31 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB54_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a3, .LBB54_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB54_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB54_3: # in Loop: Header=BB54_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB54_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b seq_cst ret i8 %1 } @@ -3928,6 +5090,28 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB55_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b monotonic ret i16 %1 } @@ -3974,6 +5158,28 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB56_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b acquire ret i16 %1 } @@ -4020,6 +5226,28 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB57_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b release ret i16 %1 } @@ -4066,6 +5294,28 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB58_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b acq_rel ret i16 %1 } @@ -4112,6 +5362,28 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB59_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b seq_cst ret i16 %1 } @@ -4158,6 +5430,28 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB60_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b monotonic ret i16 %1 } @@ -4204,6 +5498,28 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB61_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b acquire ret i16 %1 } @@ -4250,6 +5566,28 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB62_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b release ret i16 %1 } @@ -4296,6 +5634,28 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB63_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b acq_rel ret i16 %1 } @@ -4342,6 +5702,28 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB64_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b seq_cst ret i16 %1 } @@ -4388,6 +5770,28 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB65_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB65_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b monotonic ret i16 %1 } @@ -4434,6 +5838,28 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB66_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB66_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b acquire ret i16 %1 } @@ -4480,6 +5906,28 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB67_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB67_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b release ret i16 %1 } @@ -4526,6 +5974,28 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB68_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB68_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b acq_rel ret i16 %1 } @@ -4572,6 +6042,28 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB69_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB69_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b seq_cst ret i16 %1 } @@ -4612,6 +6104,22 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sll a1, a1, a3 +; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 +; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b monotonic ret i16 %1 } @@ -4652,6 +6160,22 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sll a1, a1, a3 +; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 +; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b acquire ret i16 %1 } @@ -4692,6 +6216,22 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sll a1, a1, a3 +; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 +; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b release ret i16 %1 } @@ -4732,6 +6272,22 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sll a1, a1, a3 +; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 +; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b acq_rel ret i16 %1 } @@ -4772,6 +6328,22 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sll a1, a1, a3 +; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 +; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b seq_cst ret i16 %1 } @@ -4819,6 +6391,29 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB75_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB75_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b monotonic ret i16 %1 } @@ -4866,6 +6461,29 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB76_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB76_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b acquire ret i16 %1 } @@ -4913,6 +6531,29 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB77_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB77_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b release ret i16 %1 } @@ -4960,6 +6601,29 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB78_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB78_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b acq_rel ret i16 %1 } @@ -5007,6 +6671,29 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB79_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB79_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b seq_cst ret i16 %1 } @@ -5044,6 +6731,19 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b monotonic ret i16 %1 } @@ -5081,6 +6781,19 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b acquire ret i16 %1 } @@ -5118,6 +6831,19 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b release ret i16 %1 } @@ -5155,6 +6881,19 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b acq_rel ret i16 %1 } @@ -5192,6 +6931,19 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b seq_cst ret i16 %1 } @@ -5229,6 +6981,19 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b monotonic ret i16 %1 } @@ -5266,6 +7031,19 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b acquire ret i16 %1 } @@ -5303,6 +7081,19 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b release ret i16 %1 } @@ -5340,6 +7131,19 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b acq_rel ret i16 %1 } @@ -5377,6 +7181,19 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b seq_cst ret i16 %1 } @@ -5498,6 +7315,37 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB90_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB90_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB90_3: # in Loop: Header=BB90_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB90_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b monotonic ret i16 %1 } @@ -5625,6 +7473,37 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB91_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB91_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB91_3: # in Loop: Header=BB91_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB91_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b acquire ret i16 %1 } @@ -5752,6 +7631,37 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB92_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB92_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB92_3: # in Loop: Header=BB92_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB92_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b release ret i16 %1 } @@ -5885,6 +7795,37 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB93_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB93_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB93_3: # in Loop: Header=BB93_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB93_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b acq_rel ret i16 %1 } @@ -6012,6 +7953,37 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB94_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB94_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB94_3: # in Loop: Header=BB94_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB94_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b seq_cst ret i16 %1 } @@ -6133,6 +8105,37 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB95_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB95_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB95_3: # in Loop: Header=BB95_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB95_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b monotonic ret i16 %1 } @@ -6260,6 +8263,37 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB96_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB96_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB96_3: # in Loop: Header=BB96_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB96_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b acquire ret i16 %1 } @@ -6387,6 +8421,37 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB97_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB97_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB97_3: # in Loop: Header=BB97_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB97_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b release ret i16 %1 } @@ -6520,6 +8585,37 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB98_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB98_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB98_3: # in Loop: Header=BB98_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB98_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b acq_rel ret i16 %1 } @@ -6647,6 +8743,37 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB99_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB99_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB99_3: # in Loop: Header=BB99_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB99_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b seq_cst ret i16 %1 } @@ -6767,6 +8894,32 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB100_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a2, a1, .LBB100_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB100_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB100_3: # in Loop: Header=BB100_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB100_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b monotonic ret i16 %1 } @@ -6893,6 +9046,32 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB101_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a2, a1, .LBB101_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB101_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB101_3: # in Loop: Header=BB101_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB101_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b acquire ret i16 %1 } @@ -7019,6 +9198,32 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB102_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a2, a1, .LBB102_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB102_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB102_3: # in Loop: Header=BB102_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB102_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b release ret i16 %1 } @@ -7151,6 +9356,32 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 72(sp) ; RV64I-NEXT: addi sp, sp, 80 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB103_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a2, a1, .LBB103_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB103_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB103_3: # in Loop: Header=BB103_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB103_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b acq_rel ret i16 %1 } @@ -7277,6 +9508,32 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB104_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a2, a1, .LBB104_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB104_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB104_3: # in Loop: Header=BB104_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB104_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b seq_cst ret i16 %1 } @@ -7397,6 +9654,32 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB105_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a2, .LBB105_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB105_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB105_3: # in Loop: Header=BB105_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB105_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b monotonic ret i16 %1 } @@ -7523,6 +9806,32 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB106_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a2, .LBB106_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB106_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB106_3: # in Loop: Header=BB106_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB106_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b acquire ret i16 %1 } @@ -7649,6 +9958,32 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB107_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a2, .LBB107_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB107_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB107_3: # in Loop: Header=BB107_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB107_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b release ret i16 %1 } @@ -7781,6 +10116,32 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 72(sp) ; RV64I-NEXT: addi sp, sp, 80 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB108_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a2, .LBB108_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB108_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB108_3: # in Loop: Header=BB108_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB108_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b acq_rel ret i16 %1 } @@ -7907,6 +10268,32 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB109_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a2, .LBB109_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB109_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB109_3: # in Loop: Header=BB109_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB109_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b seq_cst ret i16 %1 } @@ -7936,6 +10323,11 @@ define i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b monotonic ret i32 %1 } @@ -7965,6 +10357,11 @@ define i32 @atomicrmw_xchg_i32_acquire(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b acquire ret i32 %1 } @@ -7994,6 +10391,11 @@ define i32 @atomicrmw_xchg_i32_release(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b release ret i32 %1 } @@ -8023,6 +10425,11 @@ define i32 @atomicrmw_xchg_i32_acq_rel(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8052,6 +10459,11 @@ define i32 @atomicrmw_xchg_i32_seq_cst(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8081,6 +10493,11 @@ define i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i32* %a, i32 %b monotonic ret i32 %1 } @@ -8110,6 +10527,11 @@ define i32 @atomicrmw_add_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i32* %a, i32 %b acquire ret i32 %1 } @@ -8139,6 +10561,11 @@ define i32 @atomicrmw_add_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i32* %a, i32 %b release ret i32 %1 } @@ -8168,6 +10595,11 @@ define i32 @atomicrmw_add_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8197,6 +10629,11 @@ define i32 @atomicrmw_add_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8227,6 +10664,12 @@ define i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b monotonic ret i32 %1 } @@ -8257,6 +10700,12 @@ define i32 @atomicrmw_sub_i32_acquire(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b acquire ret i32 %1 } @@ -8287,6 +10736,12 @@ define i32 @atomicrmw_sub_i32_release(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b release ret i32 %1 } @@ -8317,6 +10772,12 @@ define i32 @atomicrmw_sub_i32_acq_rel(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8347,6 +10808,12 @@ define i32 @atomicrmw_sub_i32_seq_cst(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8376,6 +10843,11 @@ define i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i32* %a, i32 %b monotonic ret i32 %1 } @@ -8405,6 +10877,11 @@ define i32 @atomicrmw_and_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i32* %a, i32 %b acquire ret i32 %1 } @@ -8434,6 +10911,11 @@ define i32 @atomicrmw_and_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i32* %a, i32 %b release ret i32 %1 } @@ -8463,6 +10945,11 @@ define i32 @atomicrmw_and_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8492,6 +10979,11 @@ define i32 @atomicrmw_and_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8528,6 +11020,18 @@ define i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB130_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB130_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b monotonic ret i32 %1 } @@ -8564,6 +11068,18 @@ define i32 @atomicrmw_nand_i32_acquire(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB131_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB131_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b acquire ret i32 %1 } @@ -8600,6 +11116,18 @@ define i32 @atomicrmw_nand_i32_release(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB132_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB132_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b release ret i32 %1 } @@ -8636,6 +11164,18 @@ define i32 @atomicrmw_nand_i32_acq_rel(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB133_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB133_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8672,6 +11212,18 @@ define i32 @atomicrmw_nand_i32_seq_cst(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB134_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB134_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8701,6 +11253,11 @@ define i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i32* %a, i32 %b monotonic ret i32 %1 } @@ -8730,6 +11287,11 @@ define i32 @atomicrmw_or_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i32* %a, i32 %b acquire ret i32 %1 } @@ -8759,6 +11321,11 @@ define i32 @atomicrmw_or_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i32* %a, i32 %b release ret i32 %1 } @@ -8788,6 +11355,11 @@ define i32 @atomicrmw_or_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8817,6 +11389,11 @@ define i32 @atomicrmw_or_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8846,6 +11423,11 @@ define i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i32* %a, i32 %b monotonic ret i32 %1 } @@ -8875,6 +11457,11 @@ define i32 @atomicrmw_xor_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i32* %a, i32 %b acquire ret i32 %1 } @@ -8904,6 +11491,11 @@ define i32 @atomicrmw_xor_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i32* %a, i32 %b release ret i32 %1 } @@ -8933,6 +11525,11 @@ define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8962,6 +11559,11 @@ define i32 @atomicrmw_xor_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i32* %a, i32 %b seq_cst ret i32 %1 } @@ -9048,6 +11650,11 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i32* %a, i32 %b monotonic ret i32 %1 } @@ -9140,6 +11747,11 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i32* %a, i32 %b acquire ret i32 %1 } @@ -9232,6 +11844,11 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i32* %a, i32 %b release ret i32 %1 } @@ -9330,6 +11947,11 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i32* %a, i32 %b acq_rel ret i32 %1 } @@ -9422,6 +12044,11 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i32* %a, i32 %b seq_cst ret i32 %1 } @@ -9508,6 +12135,11 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i32* %a, i32 %b monotonic ret i32 %1 } @@ -9600,6 +12232,11 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i32* %a, i32 %b acquire ret i32 %1 } @@ -9692,6 +12329,11 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i32* %a, i32 %b release ret i32 %1 } @@ -9790,6 +12432,11 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i32* %a, i32 %b acq_rel ret i32 %1 } @@ -9882,6 +12529,11 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i32* %a, i32 %b seq_cst ret i32 %1 } @@ -9968,6 +12620,11 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i32* %a, i32 %b monotonic ret i32 %1 } @@ -10060,6 +12717,11 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i32* %a, i32 %b acquire ret i32 %1 } @@ -10152,6 +12814,11 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i32* %a, i32 %b release ret i32 %1 } @@ -10250,6 +12917,11 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i32* %a, i32 %b acq_rel ret i32 %1 } @@ -10342,6 +13014,11 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i32* %a, i32 %b seq_cst ret i32 %1 } @@ -10428,6 +13105,11 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i32* %a, i32 %b monotonic ret i32 %1 } @@ -10520,6 +13202,11 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i32* %a, i32 %b acquire ret i32 %1 } @@ -10612,6 +13299,11 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i32* %a, i32 %b release ret i32 %1 } @@ -10710,6 +13402,11 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i32* %a, i32 %b acq_rel ret i32 %1 } @@ -10802,6 +13499,11 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i32* %a, i32 %b seq_cst ret i32 %1 } @@ -10836,6 +13538,11 @@ define i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b monotonic ret i64 %1 } @@ -10870,6 +13577,11 @@ define i64 @atomicrmw_xchg_i64_acquire(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b acquire ret i64 %1 } @@ -10904,6 +13616,11 @@ define i64 @atomicrmw_xchg_i64_release(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b release ret i64 %1 } @@ -10938,6 +13655,11 @@ define i64 @atomicrmw_xchg_i64_acq_rel(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b acq_rel ret i64 %1 } @@ -10972,6 +13694,11 @@ define i64 @atomicrmw_xchg_i64_seq_cst(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11006,6 +13733,11 @@ define i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i64* %a, i64 %b monotonic ret i64 %1 } @@ -11040,6 +13772,11 @@ define i64 @atomicrmw_add_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i64* %a, i64 %b acquire ret i64 %1 } @@ -11074,6 +13811,11 @@ define i64 @atomicrmw_add_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i64* %a, i64 %b release ret i64 %1 } @@ -11108,6 +13850,11 @@ define i64 @atomicrmw_add_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11142,6 +13889,11 @@ define i64 @atomicrmw_add_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11176,6 +13928,12 @@ define i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b monotonic ret i64 %1 } @@ -11210,6 +13968,12 @@ define i64 @atomicrmw_sub_i64_acquire(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b acquire ret i64 %1 } @@ -11244,6 +14008,12 @@ define i64 @atomicrmw_sub_i64_release(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b release ret i64 %1 } @@ -11278,6 +14048,12 @@ define i64 @atomicrmw_sub_i64_acq_rel(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11312,6 +14088,12 @@ define i64 @atomicrmw_sub_i64_seq_cst(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11346,6 +14128,11 @@ define i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i64* %a, i64 %b monotonic ret i64 %1 } @@ -11380,6 +14167,11 @@ define i64 @atomicrmw_and_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i64* %a, i64 %b acquire ret i64 %1 } @@ -11414,6 +14206,11 @@ define i64 @atomicrmw_and_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i64* %a, i64 %b release ret i64 %1 } @@ -11448,6 +14245,11 @@ define i64 @atomicrmw_and_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11482,6 +14284,11 @@ define i64 @atomicrmw_and_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11516,6 +14323,18 @@ define i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB185_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.d a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB185_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b monotonic ret i64 %1 } @@ -11550,6 +14369,18 @@ define i64 @atomicrmw_nand_i64_acquire(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB186_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aq a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.d a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB186_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b acquire ret i64 %1 } @@ -11584,6 +14415,18 @@ define i64 @atomicrmw_nand_i64_release(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB187_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.d.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB187_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b release ret i64 %1 } @@ -11618,6 +14461,18 @@ define i64 @atomicrmw_nand_i64_acq_rel(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB188_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aq a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.d.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB188_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11652,6 +14507,18 @@ define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB189_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aqrl a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.d.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB189_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11686,6 +14553,11 @@ define i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i64* %a, i64 %b monotonic ret i64 %1 } @@ -11720,6 +14592,11 @@ define i64 @atomicrmw_or_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i64* %a, i64 %b acquire ret i64 %1 } @@ -11754,6 +14631,11 @@ define i64 @atomicrmw_or_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i64* %a, i64 %b release ret i64 %1 } @@ -11788,6 +14670,11 @@ define i64 @atomicrmw_or_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11822,6 +14709,11 @@ define i64 @atomicrmw_or_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11856,6 +14748,11 @@ define i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i64* %a, i64 %b monotonic ret i64 %1 } @@ -11890,6 +14787,11 @@ define i64 @atomicrmw_xor_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i64* %a, i64 %b acquire ret i64 %1 } @@ -11924,6 +14826,11 @@ define i64 @atomicrmw_xor_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i64* %a, i64 %b release ret i64 %1 } @@ -11958,6 +14865,11 @@ define i64 @atomicrmw_xor_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11992,6 +14904,11 @@ define i64 @atomicrmw_xor_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i64* %a, i64 %b seq_cst ret i64 %1 } @@ -12148,6 +15065,11 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i64* %a, i64 %b monotonic ret i64 %1 } @@ -12313,6 +15235,11 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i64* %a, i64 %b acquire ret i64 %1 } @@ -12478,6 +15405,11 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i64* %a, i64 %b release ret i64 %1 } @@ -12652,6 +15584,11 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i64* %a, i64 %b acq_rel ret i64 %1 } @@ -12817,6 +15754,11 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i64* %a, i64 %b seq_cst ret i64 %1 } @@ -12975,6 +15917,11 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i64* %a, i64 %b monotonic ret i64 %1 } @@ -13142,6 +16089,11 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i64* %a, i64 %b acquire ret i64 %1 } @@ -13309,6 +16261,11 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i64* %a, i64 %b release ret i64 %1 } @@ -13485,6 +16442,11 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i64* %a, i64 %b acq_rel ret i64 %1 } @@ -13652,6 +16614,11 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i64* %a, i64 %b seq_cst ret i64 %1 } @@ -13808,6 +16775,11 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i64* %a, i64 %b monotonic ret i64 %1 } @@ -13973,6 +16945,11 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i64* %a, i64 %b acquire ret i64 %1 } @@ -14138,6 +17115,11 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i64* %a, i64 %b release ret i64 %1 } @@ -14312,6 +17294,11 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i64* %a, i64 %b acq_rel ret i64 %1 } @@ -14477,6 +17464,11 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i64* %a, i64 %b seq_cst ret i64 %1 } @@ -14635,6 +17627,11 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i64* %a, i64 %b monotonic ret i64 %1 } @@ -14802,6 +17799,11 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i64* %a, i64 %b acquire ret i64 %1 } @@ -14969,6 +17971,11 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i64* %a, i64 %b release ret i64 %1 } @@ -15145,6 +18152,11 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i64* %a, i64 %b acq_rel ret i64 %1 } @@ -15312,6 +18324,11 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i64* %a, i64 %b seq_cst ret i64 %1 } From 421a6bc48bcb680774e996b18c15eed878e21ed1 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Tue, 22 Jan 2019 12:11:53 +0000 Subject: [PATCH 02/11] [RISCV] Quick fix for PR40333 Avoid the infinite loop caused by the target DAG combine converting ANYEXT to SIGNEXT and the target-independent DAG combine logic converting back to ANYEXT. Do this by not adding the new node to the worklist. Committing directly as this definitely doesn't make the problem any worse, and I intend to follow-up with a patch that avoids this custom combiner logic altogether and just lowers the i32 operations to a target-specific SelectionDAG node. This should be easier to reason about and improve codegen quality in some cases (though may miss out on some later DAG combines). llvm-svn: 351806 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 6 ++++- llvm/test/CodeGen/RISCV/pr40333.ll | 27 +++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 llvm/test/CodeGen/RISCV/pr40333.ll diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 5f55cea4f563a..a3a774604880e 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -576,7 +576,11 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, !(Subtarget.hasStdExtM() && isVariableSDivUDivURem(Src))) break; SDLoc DL(N); - return DCI.CombineTo(N, DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src)); + // Don't add the new node to the DAGCombiner worklist, in order to avoid + // an infinite cycle due to SimplifyDemandedBits converting the + // SIGN_EXTEND back to ANY_EXTEND. + return DCI.CombineTo(N, DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src), + false); } case RISCVISD::SplitF64: { // If the input to SplitF64 is just BuildPairF64 then the operation is diff --git a/llvm/test/CodeGen/RISCV/pr40333.ll b/llvm/test/CodeGen/RISCV/pr40333.ll new file mode 100644 index 0000000000000..3f7ae8da3a228 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/pr40333.ll @@ -0,0 +1,27 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s + +; This test case is significantly simplified from the submitted .ll but +; demonstrates the same issue. At the time of this problem report, an infinite +; loop would be created in DAGCombine, converting ANY_EXTEND to SIGN_EXTEND +; and back again. + +; TODO: This test case is also an example of where it would be cheaper to +; select SRLW, but the current lowering strategy fails to do so. + +define signext i8 @foo(i32 %a, i32 %b) nounwind { +; RV64I-LABEL: foo: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: srli a1, a1, 32 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: srl a0, a0, a1 +; RV64I-NEXT: slli a0, a0, 56 +; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: ret + %1 = lshr i32 %a, %b + %2 = trunc i32 %1 to i8 + ret i8 %2 +} From c6daa4fdfa65dc84c60c8db70e637bf26688d4ba Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Tue, 22 Jan 2019 14:05:11 +0000 Subject: [PATCH 03/11] [RISCV][NFC] Change naming scheme for RISC-V specific DAG nodes Previously we had names like 'Call' or 'Tail'. This potentially clashes with the naming scheme used elsewhere in RISCVInstrInfo.td. Many other backends would use names like AArch64call or PPCtail. I prefer the SystemZ approach, which uses prefixed all-lowercase names. This matches the naming scheme used for target-independent SelectionDAG nodes. llvm-svn: 351823 --- llvm/lib/Target/RISCV/RISCVInstrInfo.td | 93 +++++++++++++------------ 1 file changed, 50 insertions(+), 43 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index d7cc13d4fabd7..d3bd82e81e9f5 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -17,36 +17,41 @@ include "RISCVInstrFormats.td" // RISC-V specific DAG Nodes. //===----------------------------------------------------------------------===// -def SDT_RISCVCall : SDTypeProfile<0, -1, [SDTCisVT<0, XLenVT>]>; -def SDT_RISCVCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, - SDTCisVT<1, i32>]>; -def SDT_RISCVCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, - SDTCisVT<1, i32>]>; -def SDT_RISCVSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>, - SDTCisSameAs<0, 4>, - SDTCisSameAs<4, 5>]>; - - -def Call : SDNode<"RISCVISD::CALL", SDT_RISCVCall, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; -def CallSeqStart : SDNode<"ISD::CALLSEQ_START", SDT_RISCVCallSeqStart, - [SDNPHasChain, SDNPOutGlue]>; -def CallSeqEnd : SDNode<"ISD::CALLSEQ_END", SDT_RISCVCallSeqEnd, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; -def RetFlag : SDNode<"RISCVISD::RET_FLAG", SDTNone, - [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; -def URetFlag : SDNode<"RISCVISD::URET_FLAG", SDTNone, - [SDNPHasChain, SDNPOptInGlue]>; -def SRetFlag : SDNode<"RISCVISD::SRET_FLAG", SDTNone, - [SDNPHasChain, SDNPOptInGlue]>; -def MRetFlag : SDNode<"RISCVISD::MRET_FLAG", SDTNone, - [SDNPHasChain, SDNPOptInGlue]>; -def SelectCC : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC, - [SDNPInGlue]>; -def Tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, - SDNPVariadic]>; +// Target-independent type requirements, but with target-specific formats. +def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, + SDTCisVT<1, i32>]>; +def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, + SDTCisVT<1, i32>]>; + +// Target-dependent type requirements. +def SDT_RISCVCall : SDTypeProfile<0, -1, [SDTCisVT<0, XLenVT>]>; +def SDT_RISCVSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>, + SDTCisSameAs<0, 4>, + SDTCisSameAs<4, 5>]>; + +// Target-independent nodes, but with target-specific formats. +def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart, + [SDNPHasChain, SDNPOutGlue]>; +def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; + +// Target-dependent nodes. +def riscv_call : SDNode<"RISCVISD::CALL", SDT_RISCVCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; +def riscv_ret_flag : SDNode<"RISCVISD::RET_FLAG", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; +def riscv_uret_flag : SDNode<"RISCVISD::URET_FLAG", SDTNone, + [SDNPHasChain, SDNPOptInGlue]>; +def riscv_sret_flag : SDNode<"RISCVISD::SRET_FLAG", SDTNone, + [SDNPHasChain, SDNPOptInGlue]>; +def riscv_mret_flag : SDNode<"RISCVISD::MRET_FLAG", SDTNone, + [SDNPHasChain, SDNPOptInGlue]>; +def riscv_selectcc : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC, + [SDNPInGlue]>; +def riscv_tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. @@ -746,7 +751,7 @@ class SelectCC_rrirr : Pseudo<(outs valty:$dst), (ins cmpty:$lhs, cmpty:$rhs, ixlenimm:$imm, valty:$truev, valty:$falsev), - [(set valty:$dst, (SelectCC cmpty:$lhs, cmpty:$rhs, + [(set valty:$dst, (riscv_selectcc cmpty:$lhs, cmpty:$rhs, (XLenVT imm:$imm), valty:$truev, valty:$falsev))]>; def Select_GPR_Using_CC_GPR : SelectCC_rrirr; @@ -802,22 +807,23 @@ def : Pat<(brind (add GPR:$rs1, simm12:$imm12)), // Define isCodeGenOnly = 0 to support parsing assembly "call" instruction. let isCall = 1, Defs = [X1], isCodeGenOnly = 0 in def PseudoCALL : Pseudo<(outs), (ins bare_symbol:$func), - [(Call tglobaladdr:$func)]> { + [(riscv_call tglobaladdr:$func)]> { let AsmString = "call\t$func"; } -def : Pat<(Call texternalsym:$func), (PseudoCALL texternalsym:$func)>; +def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>; -def : Pat<(URetFlag), (URET X0, X0)>; -def : Pat<(SRetFlag), (SRET X0, X0)>; -def : Pat<(MRetFlag), (MRET X0, X0)>; +def : Pat<(riscv_uret_flag), (URET X0, X0)>; +def : Pat<(riscv_sret_flag), (SRET X0, X0)>; +def : Pat<(riscv_mret_flag), (MRET X0, X0)>; let isCall = 1, Defs = [X1] in -def PseudoCALLIndirect : Pseudo<(outs), (ins GPR:$rs1), [(Call GPR:$rs1)]>, +def PseudoCALLIndirect : Pseudo<(outs), (ins GPR:$rs1), + [(riscv_call GPR:$rs1)]>, PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>; let isBarrier = 1, isReturn = 1, isTerminator = 1 in -def PseudoRET : Pseudo<(outs), (ins), [(RetFlag)]>, +def PseudoRET : Pseudo<(outs), (ins), [(riscv_ret_flag)]>, PseudoInstExpansion<(JALR X0, X1, 0)>; // PseudoTAIL is a pseudo instruction similar to PseudoCALL and will eventually @@ -830,12 +836,13 @@ def PseudoTAIL : Pseudo<(outs), (ins bare_symbol:$dst), []> { } let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2] in -def PseudoTAILIndirect : Pseudo<(outs), (ins GPRTC:$rs1), [(Tail GPRTC:$rs1)]>, +def PseudoTAILIndirect : Pseudo<(outs), (ins GPRTC:$rs1), + [(riscv_tail GPRTC:$rs1)]>, PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>; -def : Pat<(Tail (iPTR tglobaladdr:$dst)), +def : Pat<(riscv_tail (iPTR tglobaladdr:$dst)), (PseudoTAIL texternalsym:$dst)>; -def : Pat<(Tail (iPTR texternalsym:$dst)), +def : Pat<(riscv_tail (iPTR texternalsym:$dst)), (PseudoTAIL texternalsym:$dst)>; let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0, @@ -906,9 +913,9 @@ def : Pat<(atomic_fence (XLenVT 7), (imm)), (FENCE 0b11, 0b11)>; // Pessimistically assume the stack pointer will be clobbered let Defs = [X2], Uses = [X2] in { def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), - [(CallSeqStart timm:$amt1, timm:$amt2)]>; + [(callseq_start timm:$amt1, timm:$amt2)]>; def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), - [(CallSeqEnd timm:$amt1, timm:$amt2)]>; + [(callseq_end timm:$amt1, timm:$amt2)]>; } // Defs = [X2], Uses = [X2] /// RV64 patterns From 0a77c0e8830eb9d9b6fa5429be958f5025988a53 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Fri, 25 Jan 2019 05:04:00 +0000 Subject: [PATCH 04/11] [RISCV] Custom-legalise 32-bit variable shifts on RV64 The previous DAG combiner-based approach had an issue with infinite loops between the target-dependent and target-independent combiner logic (see PR40333). Although this was worked around in rL351806, the combiner-based approach is still potentially brittle and can fail to select the 32-bit shift variant when profitable to do so, as demonstrated in the pr40333.ll test case. This patch instead introduces target-specific SelectionDAG nodes for SHLW/SRLW/SRAW and custom-lowers variable i32 shifts to them. pr40333.ll is a good example of how this approach can improve codegen. This adds DAG combine that does SimplifyDemandedBits on the operands (only lower 32-bits of first operand and lower 5 bits of second operand are read). This seems better than implementing SimplifyDemandedBitsForTargetNode as there is no guarantee that would be called (and it's not for e.g. the anyext return test cases). Also implements ComputeNumSignBitsForTargetNode. There are codegen changes in atomic-rmw.ll and atomic-cmpxchg.ll but the new instruction sequences are semantically equivalent. Differential Revision: https://reviews.llvm.org/D57085 llvm-svn: 352169 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 118 ++++-- llvm/lib/Target/RISCV/RISCVISelLowering.h | 14 +- llvm/lib/Target/RISCV/RISCVInstrInfo.td | 40 +- llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll | 80 ++-- llvm/test/CodeGen/RISCV/atomic-rmw.ll | 440 ++++++++++---------- llvm/test/CodeGen/RISCV/pr40333.ll | 9 +- 6 files changed, 366 insertions(+), 335 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index a3a774604880e..e684f57d8e01f 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -81,10 +81,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); if (Subtarget.is64Bit()) { - setTargetDAGCombine(ISD::SHL); - setTargetDAGCombine(ISD::SRL); - setTargetDAGCombine(ISD::SRA); setTargetDAGCombine(ISD::ANY_EXTEND); + setOperationAction(ISD::SHL, MVT::i32, Custom); + setOperationAction(ISD::SRA, MVT::i32, Custom); + setOperationAction(ISD::SRL, MVT::i32, Custom); } if (!Subtarget.hasStdExtM()) { @@ -513,15 +513,52 @@ SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); } -// Return true if the given node is a shift with a non-constant shift amount. -static bool isVariableShift(SDValue Val) { - switch (Val.getOpcode()) { +// Returns the opcode of the target-specific SDNode that implements the 32-bit +// form of the given Opcode. +static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { + switch (Opcode) { default: - return false; + llvm_unreachable("Unexpected opcode"); case ISD::SHL: + return RISCVISD::SLLW; case ISD::SRA: + return RISCVISD::SRAW; case ISD::SRL: - return Val.getOperand(1).getOpcode() != ISD::Constant; + return RISCVISD::SRLW; + } +} + +// Converts the given 32-bit operation to a target-specific SelectionDAG node. +// Because i32 isn't a legal type for RV64, these operations would otherwise +// be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W +// later one because the fact the operation was originally of type i32 is +// lost. +static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) { + SDLoc DL(N); + RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); + SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); + SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); + SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); + // ReplaceNodeResults requires we maintain the same type for the return value. + return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); +} + +void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, + SmallVectorImpl &Results, + SelectionDAG &DAG) const { + SDLoc DL(N); + switch (N->getOpcode()) { + default: + llvm_unreachable("Don't know how to custom type legalize this operation!"); + case ISD::SHL: + case ISD::SRA: + case ISD::SRL: + assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && + "Unexpected custom legalisation"); + if (N->getOperand(1).getOpcode() == ISD::Constant) + return; + Results.push_back(customLegalizeToWOp(N, DAG)); + break; } } @@ -546,34 +583,14 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, switch (N->getOpcode()) { default: break; - case ISD::SHL: - case ISD::SRL: - case ISD::SRA: { - assert(Subtarget.getXLen() == 64 && "Combine should be 64-bit only"); - if (!DCI.isBeforeLegalize()) - break; - SDValue RHS = N->getOperand(1); - if (N->getValueType(0) != MVT::i32 || RHS->getOpcode() == ISD::Constant || - (RHS->getOpcode() == ISD::AssertZext && - cast(RHS->getOperand(1))->getVT().getSizeInBits() <= 5)) - break; - SDValue LHS = N->getOperand(0); - SDLoc DL(N); - SDValue NewRHS = - DAG.getNode(ISD::AssertZext, DL, RHS.getValueType(), RHS, - DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), 5))); - return DCI.CombineTo( - N, DAG.getNode(N->getOpcode(), DL, LHS.getValueType(), LHS, NewRHS)); - } case ISD::ANY_EXTEND: { - // If any-extending an i32 variable-length shift or sdiv/udiv/urem to i64, - // then instead sign-extend in order to increase the chance of being able - // to select the sllw/srlw/sraw/divw/divuw/remuw instructions. + // If any-extending an i32 sdiv/udiv/urem to i64, then instead sign-extend + // in order to increase the chance of being able to select the + // divw/divuw/remuw instructions. SDValue Src = N->getOperand(0); if (N->getValueType(0) != MVT::i64 || Src.getValueType() != MVT::i32) break; - if (!isVariableShift(Src) && - !(Subtarget.hasStdExtM() && isVariableSDivUDivURem(Src))) + if (!(Subtarget.hasStdExtM() && isVariableSDivUDivURem(Src))) break; SDLoc DL(N); // Don't add the new node to the DAGCombiner worklist, in order to avoid @@ -590,11 +607,42 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, break; return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); } + case RISCVISD::SLLW: + case RISCVISD::SRAW: + case RISCVISD::SRLW: { + // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); + APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); + if ((SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI)) || + (SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI))) + return SDValue(); + break; + } } return SDValue(); } +unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( + SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, + unsigned Depth) const { + switch (Op.getOpcode()) { + default: + break; + case RISCVISD::SLLW: + case RISCVISD::SRAW: + case RISCVISD::SRLW: + // TODO: As the result is sign-extended, this is conservatively correct. A + // more precise answer could be calculated for SRAW depending on known + // bits in the shift amount. + return 33; + } + + return 1; +} + static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB) { assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); @@ -1683,6 +1731,12 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { return "RISCVISD::SplitF64"; case RISCVISD::TAIL: return "RISCVISD::TAIL"; + case RISCVISD::SLLW: + return "RISCVISD::SLLW"; + case RISCVISD::SRAW: + return "RISCVISD::SRAW"; + case RISCVISD::SRLW: + return "RISCVISD::SRLW"; } return nullptr; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 6970900bb0622..944ae12dc9511 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -32,7 +32,12 @@ enum NodeType : unsigned { SELECT_CC, BuildPairF64, SplitF64, - TAIL + TAIL, + // RV64I shifts, directly matching the semantics of the named RISC-V + // instructions. + SLLW, + SRAW, + SRLW }; } @@ -58,9 +63,16 @@ class RISCVTargetLowering : public TargetLowering { // Provide custom lowering hooks for some operations. SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; + void ReplaceNodeResults(SDNode *N, SmallVectorImpl &Results, + SelectionDAG &DAG) const override; SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; + unsigned ComputeNumSignBitsForTargetNode(SDValue Op, + const APInt &DemandedElts, + const SelectionDAG &DAG, + unsigned Depth) const override; + // This method returns the name of a target specific DAG node. const char *getTargetNodeName(unsigned Opcode) const override; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index d3bd82e81e9f5..4db8c9bd5b5f7 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -52,6 +52,9 @@ def riscv_selectcc : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC, def riscv_tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>; +def riscv_sllw : SDNode<"RISCVISD::SLLW", SDTIntShiftOp>; +def riscv_sraw : SDNode<"RISCVISD::SRAW", SDTIntShiftOp>; +def riscv_srlw : SDNode<"RISCVISD::SRLW", SDTIntShiftOp>; //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. @@ -668,21 +671,9 @@ def sexti32 : PatFrags<(ops node:$src), def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{ return cast(N->getOperand(1))->getVT() == MVT::i32; }]>; -def assertzexti5 : PatFrag<(ops node:$src), (assertzext node:$src), [{ - return cast(N->getOperand(1))->getVT().getSizeInBits() <= 5; -}]>; def zexti32 : PatFrags<(ops node:$src), [(and node:$src, 0xffffffff), (assertzexti32 node:$src)]>; -// Defines a legal mask for (assertzexti5 (and src, mask)) to be combinable -// with a shiftw operation. The mask mustn't modify the lower 5 bits or the -// upper 32 bits. -def shiftwamt_mask : ImmLeaf(Imm) >= 5 && isUInt<32>(Imm); -}]>; -def shiftwamt : PatFrags<(ops node:$src), - [(assertzexti5 (and node:$src, shiftwamt_mask)), - (assertzexti5 node:$src)]>; /// Immediates @@ -942,28 +933,9 @@ def : Pat<(sext_inreg (shl GPR:$rs1, uimm5:$shamt), i32), def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt), (SRAIW GPR:$rs1, uimm5:$shamt)>; -// For variable-length shifts, we rely on assertzexti5 being inserted during -// lowering (see RISCVTargetLowering::PerformDAGCombine). This enables us to -// guarantee that selecting a 32-bit variable shift is legal (as the variable -// shift is known to be <= 32). We must also be careful not to create -// semantically incorrect patterns. For instance, selecting SRLW for -// (srl (zexti32 GPR:$rs1), (shiftwamt GPR:$rs2)), -// is not guaranteed to be safe, as we don't know whether the upper 32-bits of -// the result are used or not (in the case where rs2=0, this is a -// sign-extension operation). - -def : Pat<(sext_inreg (shl GPR:$rs1, (shiftwamt GPR:$rs2)), i32), - (SLLW GPR:$rs1, GPR:$rs2)>; -def : Pat<(zexti32 (shl GPR:$rs1, (shiftwamt GPR:$rs2))), - (SRLI (SLLI (SLLW GPR:$rs1, GPR:$rs2), 32), 32)>; - -def : Pat<(sext_inreg (srl (zexti32 GPR:$rs1), (shiftwamt GPR:$rs2)), i32), - (SRLW GPR:$rs1, GPR:$rs2)>; -def : Pat<(zexti32 (srl (zexti32 GPR:$rs1), (shiftwamt GPR:$rs2))), - (SRLI (SLLI (SRLW GPR:$rs1, GPR:$rs2), 32), 32)>; - -def : Pat<(sra (sexti32 GPR:$rs1), (shiftwamt GPR:$rs2)), - (SRAW GPR:$rs1, GPR:$rs2)>; +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; /// Loads diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll index 8818645687ce5..e31b45e8990e6 100644 --- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll +++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll @@ -61,8 +61,8 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; ; RV64IA-LABEL: cmpxchg_i8_monotonic_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 @@ -139,8 +139,8 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; ; RV64IA-LABEL: cmpxchg_i8_acquire_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 @@ -217,8 +217,8 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) { ; ; RV64IA-LABEL: cmpxchg_i8_acquire_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 @@ -295,8 +295,8 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; ; RV64IA-LABEL: cmpxchg_i8_release_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 @@ -373,8 +373,8 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) { ; ; RV64IA-LABEL: cmpxchg_i8_release_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 @@ -451,8 +451,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; ; RV64IA-LABEL: cmpxchg_i8_acq_rel_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 @@ -529,8 +529,8 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) { ; ; RV64IA-LABEL: cmpxchg_i8_acq_rel_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 @@ -607,8 +607,8 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) { ; ; RV64IA-LABEL: cmpxchg_i8_seq_cst_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 @@ -685,8 +685,8 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) { ; ; RV64IA-LABEL: cmpxchg_i8_seq_cst_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 @@ -763,8 +763,8 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) { ; ; RV64IA-LABEL: cmpxchg_i8_seq_cst_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 @@ -846,8 +846,8 @@ define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) { ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 ; RV64IA-NEXT: and a2, a2, a3 -; RV64IA-NEXT: andi a4, a0, 3 -; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 ; RV64IA-NEXT: sllw a3, a3, a4 ; RV64IA-NEXT: sllw a2, a2, a4 ; RV64IA-NEXT: sllw a1, a1, a4 @@ -926,8 +926,8 @@ define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) { ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 ; RV64IA-NEXT: and a2, a2, a3 -; RV64IA-NEXT: andi a4, a0, 3 -; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 ; RV64IA-NEXT: sllw a3, a3, a4 ; RV64IA-NEXT: sllw a2, a2, a4 ; RV64IA-NEXT: sllw a1, a1, a4 @@ -1006,8 +1006,8 @@ define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) { ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 ; RV64IA-NEXT: and a2, a2, a3 -; RV64IA-NEXT: andi a4, a0, 3 -; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 ; RV64IA-NEXT: sllw a3, a3, a4 ; RV64IA-NEXT: sllw a2, a2, a4 ; RV64IA-NEXT: sllw a1, a1, a4 @@ -1086,8 +1086,8 @@ define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) { ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 ; RV64IA-NEXT: and a2, a2, a3 -; RV64IA-NEXT: andi a4, a0, 3 -; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 ; RV64IA-NEXT: sllw a3, a3, a4 ; RV64IA-NEXT: sllw a2, a2, a4 ; RV64IA-NEXT: sllw a1, a1, a4 @@ -1166,8 +1166,8 @@ define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) { ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 ; RV64IA-NEXT: and a2, a2, a3 -; RV64IA-NEXT: andi a4, a0, 3 -; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 ; RV64IA-NEXT: sllw a3, a3, a4 ; RV64IA-NEXT: sllw a2, a2, a4 ; RV64IA-NEXT: sllw a1, a1, a4 @@ -1246,8 +1246,8 @@ define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) { ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 ; RV64IA-NEXT: and a2, a2, a3 -; RV64IA-NEXT: andi a4, a0, 3 -; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 ; RV64IA-NEXT: sllw a3, a3, a4 ; RV64IA-NEXT: sllw a2, a2, a4 ; RV64IA-NEXT: sllw a1, a1, a4 @@ -1326,8 +1326,8 @@ define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) { ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 ; RV64IA-NEXT: and a2, a2, a3 -; RV64IA-NEXT: andi a4, a0, 3 -; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 ; RV64IA-NEXT: sllw a3, a3, a4 ; RV64IA-NEXT: sllw a2, a2, a4 ; RV64IA-NEXT: sllw a1, a1, a4 @@ -1406,8 +1406,8 @@ define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) { ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 ; RV64IA-NEXT: and a2, a2, a3 -; RV64IA-NEXT: andi a4, a0, 3 -; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 ; RV64IA-NEXT: sllw a3, a3, a4 ; RV64IA-NEXT: sllw a2, a2, a4 ; RV64IA-NEXT: sllw a1, a1, a4 @@ -1486,8 +1486,8 @@ define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) { ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 ; RV64IA-NEXT: and a2, a2, a3 -; RV64IA-NEXT: andi a4, a0, 3 -; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 ; RV64IA-NEXT: sllw a3, a3, a4 ; RV64IA-NEXT: sllw a2, a2, a4 ; RV64IA-NEXT: sllw a1, a1, a4 @@ -1566,8 +1566,8 @@ define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) { ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 ; RV64IA-NEXT: and a2, a2, a3 -; RV64IA-NEXT: andi a4, a0, 3 -; RV64IA-NEXT: slli a4, a4, 3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 ; RV64IA-NEXT: sllw a3, a3, a4 ; RV64IA-NEXT: sllw a2, a2, a4 ; RV64IA-NEXT: sllw a1, a1, a4 diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll index 1bd5e9e7f5f47..fa874e6fd2c56 100644 --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -52,8 +52,8 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -118,8 +118,8 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_xchg_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -184,8 +184,8 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_xchg_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -250,8 +250,8 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_xchg_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -316,8 +316,8 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_xchg_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -382,8 +382,8 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -448,8 +448,8 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -514,8 +514,8 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -580,8 +580,8 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -646,8 +646,8 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -712,8 +712,8 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_sub_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -778,8 +778,8 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_sub_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -844,8 +844,8 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_sub_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -910,8 +910,8 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_sub_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -976,8 +976,8 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_sub_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1037,11 +1037,11 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_and_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: or a1, a3, a1 ; RV64IA-NEXT: andi a0, a0, -4 @@ -1091,11 +1091,11 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_and_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: or a1, a3, a1 ; RV64IA-NEXT: andi a0, a0, -4 @@ -1145,11 +1145,11 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_and_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: or a1, a3, a1 ; RV64IA-NEXT: andi a0, a0, -4 @@ -1199,11 +1199,11 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_and_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: or a1, a3, a1 ; RV64IA-NEXT: andi a0, a0, -4 @@ -1253,11 +1253,11 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_and_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: not a3, a3 ; RV64IA-NEXT: or a1, a3, a1 ; RV64IA-NEXT: andi a0, a0, -4 @@ -1313,8 +1313,8 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_nand_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1381,8 +1381,8 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_nand_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1449,8 +1449,8 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_nand_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1517,8 +1517,8 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_nand_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1585,8 +1585,8 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) { ; ; RV64IA-LABEL: atomicrmw_nand_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1643,9 +1643,9 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_or_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoor.w a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -1689,9 +1689,9 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_or_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoor.w.aq a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -1735,9 +1735,9 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_or_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoor.w.rl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -1781,9 +1781,9 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_or_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -1827,9 +1827,9 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_or_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -1873,9 +1873,9 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_xor_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoxor.w a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -1919,9 +1919,9 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_xor_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -1965,9 +1965,9 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_xor_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -2011,9 +2011,9 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_xor_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -2057,9 +2057,9 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64IA-LABEL: atomicrmw_xor_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -3738,8 +3738,8 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -3880,8 +3880,8 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4022,8 +4022,8 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4170,8 +4170,8 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4312,8 +4312,8 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4448,8 +4448,8 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4590,8 +4590,8 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4732,8 +4732,8 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4880,8 +4880,8 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -5022,8 +5022,8 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 @@ -5096,8 +5096,8 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5164,8 +5164,8 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5232,8 +5232,8 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5300,8 +5300,8 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5368,8 +5368,8 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5436,8 +5436,8 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5504,8 +5504,8 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5572,8 +5572,8 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5640,8 +5640,8 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5708,8 +5708,8 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5776,8 +5776,8 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5844,8 +5844,8 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5912,8 +5912,8 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -5980,8 +5980,8 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6048,8 +6048,8 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6110,10 +6110,10 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 -; RV64IA-NEXT: sll a1, a1, a3 -; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: not a2, a2 ; RV64IA-NEXT: or a1, a2, a1 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6166,10 +6166,10 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 -; RV64IA-NEXT: sll a1, a1, a3 -; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: not a2, a2 ; RV64IA-NEXT: or a1, a2, a1 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6222,10 +6222,10 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 -; RV64IA-NEXT: sll a1, a1, a3 -; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: not a2, a2 ; RV64IA-NEXT: or a1, a2, a1 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6278,10 +6278,10 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 -; RV64IA-NEXT: sll a1, a1, a3 -; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: not a2, a2 ; RV64IA-NEXT: or a1, a2, a1 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6334,10 +6334,10 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 -; RV64IA-NEXT: sll a1, a1, a3 -; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: not a2, a2 ; RV64IA-NEXT: or a1, a2, a1 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6397,8 +6397,8 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6467,8 +6467,8 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6537,8 +6537,8 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6607,8 +6607,8 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6677,8 +6677,8 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a2, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -6737,9 +6737,9 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoor.w a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -6787,9 +6787,9 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoor.w.aq a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -6837,9 +6837,9 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoor.w.rl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -6887,9 +6887,9 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -6937,9 +6937,9 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -6987,9 +6987,9 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoxor.w a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -7037,9 +7037,9 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -7087,9 +7087,9 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -7137,9 +7137,9 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -7187,9 +7187,9 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a2, a0, 3 -; RV64IA-NEXT: slli a2, a2, 3 -; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) ; RV64IA-NEXT: srlw a0, a0, a2 @@ -8900,8 +8900,8 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a6, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -9052,8 +9052,8 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a6, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -9204,8 +9204,8 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a6, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -9362,8 +9362,8 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a6, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -9514,8 +9514,8 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a6, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -9660,8 +9660,8 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a6, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -9812,8 +9812,8 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a6, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -9964,8 +9964,8 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a6, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -10122,8 +10122,8 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a6, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 @@ -10274,8 +10274,8 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64IA-NEXT: lui a2, 16 ; RV64IA-NEXT: addiw a2, a2, -1 ; RV64IA-NEXT: and a1, a1, a2 -; RV64IA-NEXT: andi a3, a0, 3 -; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: sllw a6, a2, a3 ; RV64IA-NEXT: sllw a1, a1, a3 ; RV64IA-NEXT: andi a0, a0, -4 diff --git a/llvm/test/CodeGen/RISCV/pr40333.ll b/llvm/test/CodeGen/RISCV/pr40333.ll index 3f7ae8da3a228..79e24e3296b1f 100644 --- a/llvm/test/CodeGen/RISCV/pr40333.ll +++ b/llvm/test/CodeGen/RISCV/pr40333.ll @@ -7,17 +7,10 @@ ; loop would be created in DAGCombine, converting ANY_EXTEND to SIGN_EXTEND ; and back again. -; TODO: This test case is also an example of where it would be cheaper to -; select SRLW, but the current lowering strategy fails to do so. - define signext i8 @foo(i32 %a, i32 %b) nounwind { ; RV64I-LABEL: foo: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a1, 32 -; RV64I-NEXT: srli a1, a1, 32 -; RV64I-NEXT: slli a0, a0, 32 -; RV64I-NEXT: srli a0, a0, 32 -; RV64I-NEXT: srl a0, a0, a1 +; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: ret From 83a98f45798becb53a2add1bca87b7231b36f716 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Fri, 25 Jan 2019 05:11:34 +0000 Subject: [PATCH 05/11] [RISCV] Custom-legalise i32 SDIV/UDIV/UREM on RV64M Follow the same custom legalisation strategy as used in D57085 for variable-length shifts (see that patch summary for more discussion). Although we may lose out on some late-stage DAG combines, I think this custom legalisation strategy is ultimately easier to reason about. There are some codegen changes in rv64m-exhaustive-w-insts.ll but they are all neutral in terms of the number of instructions. Differential Revision: https://reviews.llvm.org/D57096 llvm-svn: 352171 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 58 +++++++++---------- llvm/lib/Target/RISCV/RISCVISelLowering.h | 7 ++- llvm/lib/Target/RISCV/RISCVInstrInfoM.td | 39 +++++++------ .../CodeGen/RISCV/rv64m-exhaustive-w-insts.ll | 32 +++++----- 4 files changed, 71 insertions(+), 65 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index e684f57d8e01f..cd0e871fa80d3 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -81,7 +81,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); if (Subtarget.is64Bit()) { - setTargetDAGCombine(ISD::ANY_EXTEND); setOperationAction(ISD::SHL, MVT::i32, Custom); setOperationAction(ISD::SRA, MVT::i32, Custom); setOperationAction(ISD::SRL, MVT::i32, Custom); @@ -97,6 +96,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::UREM, XLenVT, Expand); } + if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) { + setOperationAction(ISD::SDIV, MVT::i32, Custom); + setOperationAction(ISD::UDIV, MVT::i32, Custom); + setOperationAction(ISD::UREM, MVT::i32, Custom); + } + setOperationAction(ISD::SDIVREM, XLenVT, Expand); setOperationAction(ISD::UDIVREM, XLenVT, Expand); setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); @@ -525,6 +530,12 @@ static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { return RISCVISD::SRAW; case ISD::SRL: return RISCVISD::SRLW; + case ISD::SDIV: + return RISCVISD::DIVW; + case ISD::UDIV: + return RISCVISD::DIVUW; + case ISD::UREM: + return RISCVISD::REMUW; } } @@ -559,46 +570,24 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, return; Results.push_back(customLegalizeToWOp(N, DAG)); break; - } -} - -// Returns true if the given node is an sdiv, udiv, or urem with non-constant -// operands. -static bool isVariableSDivUDivURem(SDValue Val) { - switch (Val.getOpcode()) { - default: - return false; case ISD::SDIV: case ISD::UDIV: case ISD::UREM: - return Val.getOperand(0).getOpcode() != ISD::Constant && - Val.getOperand(1).getOpcode() != ISD::Constant; + assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && + Subtarget.hasStdExtM() && "Unexpected custom legalisation"); + if (N->getOperand(0).getOpcode() == ISD::Constant || + N->getOperand(1).getOpcode() == ISD::Constant) + return; + Results.push_back(customLegalizeToWOp(N, DAG)); + break; } } SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { - SelectionDAG &DAG = DCI.DAG; - switch (N->getOpcode()) { default: break; - case ISD::ANY_EXTEND: { - // If any-extending an i32 sdiv/udiv/urem to i64, then instead sign-extend - // in order to increase the chance of being able to select the - // divw/divuw/remuw instructions. - SDValue Src = N->getOperand(0); - if (N->getValueType(0) != MVT::i64 || Src.getValueType() != MVT::i32) - break; - if (!(Subtarget.hasStdExtM() && isVariableSDivUDivURem(Src))) - break; - SDLoc DL(N); - // Don't add the new node to the DAGCombiner worklist, in order to avoid - // an infinite cycle due to SimplifyDemandedBits converting the - // SIGN_EXTEND back to ANY_EXTEND. - return DCI.CombineTo(N, DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src), - false); - } case RISCVISD::SplitF64: { // If the input to SplitF64 is just BuildPairF64 then the operation is // redundant. Instead, use BuildPairF64's operands directly. @@ -634,6 +623,9 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( case RISCVISD::SLLW: case RISCVISD::SRAW: case RISCVISD::SRLW: + case RISCVISD::DIVW: + case RISCVISD::DIVUW: + case RISCVISD::REMUW: // TODO: As the result is sign-extended, this is conservatively correct. A // more precise answer could be calculated for SRAW depending on known // bits in the shift amount. @@ -1737,6 +1729,12 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { return "RISCVISD::SRAW"; case RISCVISD::SRLW: return "RISCVISD::SRLW"; + case RISCVISD::DIVW: + return "RISCVISD::DIVW"; + case RISCVISD::DIVUW: + return "RISCVISD::DIVUW"; + case RISCVISD::REMUW: + return "RISCVISD::REMUW"; } return nullptr; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 944ae12dc9511..ddc622a1c9da0 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -37,7 +37,12 @@ enum NodeType : unsigned { // instructions. SLLW, SRAW, - SRLW + SRLW, + // 32-bit operations from RV64M that can't be simply matched with a pattern + // at instruction selection time. + DIVW, + DIVUW, + REMUW }; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td index 05dd3311ad54c..7001632c4cafe 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td @@ -12,6 +12,14 @@ // //===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// RISC-V specific DAG Nodes. +//===----------------------------------------------------------------------===// + +def riscv_divw : SDNode<"RISCVISD::DIVW", SDTIntBinOp>; +def riscv_divuw : SDNode<"RISCVISD::DIVUW", SDTIntBinOp>; +def riscv_remuw : SDNode<"RISCVISD::REMUW", SDTIntBinOp>; + //===----------------------------------------------------------------------===// // Instructions //===----------------------------------------------------------------------===// @@ -53,18 +61,19 @@ def : PatGprGpr; let Predicates = [HasStdExtM, IsRV64] in { def : Pat<(sext_inreg (mul GPR:$rs1, GPR:$rs2), i32), (MULW GPR:$rs1, GPR:$rs2)>; -def : Pat<(sext_inreg (sdiv (sexti32 GPR:$rs1), - (sexti32 GPR:$rs2)), i32), - (DIVW GPR:$rs1, GPR:$rs2)>; -def : Pat<(zexti32 (sdiv (sexti32 GPR:$rs1), - (sexti32 GPR:$rs2))), - (SRLI (SLLI (DIVW GPR:$rs1, GPR:$rs2), 32), 32)>; -def : Pat<(sext_inreg (udiv (zexti32 GPR:$rs1), (zexti32 GPR:$rs2)), i32), - (DIVUW GPR:$rs1, GPR:$rs2)>; -// It's cheaper to perform a divuw and zero-extend the result than to -// zero-extend both inputs to a udiv. -def : Pat<(udiv (and GPR:$rs1, 0xffffffff), (and GPR:$rs2, 0xffffffff)), - (SRLI (SLLI (DIVUW GPR:$rs1, GPR:$rs2), 32), 32)>; + +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; + +// Handle the specific cases where using DIVU/REMU would be correct and result +// in fewer instructions than emitting DIVUW/REMUW then zero-extending the +// result. +def : Pat<(zexti32 (riscv_divuw (zexti32 GPR:$rs1), (zexti32 GPR:$rs2))), + (DIVU GPR:$rs1, GPR:$rs2)>; +def : Pat<(zexti32 (riscv_remuw (zexti32 GPR:$rs1), (zexti32 GPR:$rs2))), + (REMU GPR:$rs1, GPR:$rs2)>; + // Although the sexti32 operands may not have originated from an i32 srem, // this pattern is safe as it is impossible for two sign extended inputs to // produce a result where res[63:32]=0 and res[31]=1. @@ -73,10 +82,4 @@ def : Pat<(srem (sexti32 GPR:$rs1), (sexti32 GPR:$rs2)), def : Pat<(sext_inreg (srem (sexti32 GPR:$rs1), (sexti32 GPR:$rs2)), i32), (REMW GPR:$rs1, GPR:$rs2)>; -def : Pat<(sext_inreg (urem (zexti32 GPR:$rs1), (zexti32 GPR:$rs2)), i32), - (REMUW GPR:$rs1, GPR:$rs2)>; -// It's cheaper to perform a remuw and zero-extend the result than to -// zero-extend both inputs to a urem. -def : Pat<(urem (and GPR:$rs1, 0xffffffff), (and GPR:$rs2, 0xffffffff)), - (SRLI (SLLI (REMUW GPR:$rs1, GPR:$rs2), 32), 32)>; } // Predicates = [HasStdExtM, IsRV64] diff --git a/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll b/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll index f3e877ae9e7cb..980c5813944e9 100644 --- a/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll +++ b/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll @@ -454,9 +454,9 @@ define zeroext i32 @zext_divuw_aext_sext(i32 %a, i32 signext %b) nounwind { define zeroext i32 @zext_divuw_aext_zext(i32 %a, i32 zeroext %b) nounwind { ; RV64IM-LABEL: zext_divuw_aext_zext: ; RV64IM: # %bb.0: +; RV64IM-NEXT: divuw a0, a0, a1 ; RV64IM-NEXT: slli a0, a0, 32 ; RV64IM-NEXT: srli a0, a0, 32 -; RV64IM-NEXT: divu a0, a0, a1 ; RV64IM-NEXT: ret %1 = udiv i32 %a, %b ret i32 %1 @@ -487,9 +487,9 @@ define zeroext i32 @zext_divuw_sext_sext(i32 signext %a, i32 signext %b) nounwin define zeroext i32 @zext_divuw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind { ; RV64IM-LABEL: zext_divuw_sext_zext: ; RV64IM: # %bb.0: +; RV64IM-NEXT: divuw a0, a0, a1 ; RV64IM-NEXT: slli a0, a0, 32 ; RV64IM-NEXT: srli a0, a0, 32 -; RV64IM-NEXT: divu a0, a0, a1 ; RV64IM-NEXT: ret %1 = udiv i32 %a, %b ret i32 %1 @@ -498,9 +498,9 @@ define zeroext i32 @zext_divuw_sext_zext(i32 signext %a, i32 zeroext %b) nounwin define zeroext i32 @zext_divuw_zext_aext(i32 zeroext %a, i32 %b) nounwind { ; RV64IM-LABEL: zext_divuw_zext_aext: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a1, 32 -; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: divu a0, a0, a1 +; RV64IM-NEXT: divuw a0, a0, a1 +; RV64IM-NEXT: slli a0, a0, 32 +; RV64IM-NEXT: srli a0, a0, 32 ; RV64IM-NEXT: ret %1 = udiv i32 %a, %b ret i32 %1 @@ -509,9 +509,9 @@ define zeroext i32 @zext_divuw_zext_aext(i32 zeroext %a, i32 %b) nounwind { define zeroext i32 @zext_divuw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind { ; RV64IM-LABEL: zext_divuw_zext_sext: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a1, 32 -; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: divu a0, a0, a1 +; RV64IM-NEXT: divuw a0, a0, a1 +; RV64IM-NEXT: slli a0, a0, 32 +; RV64IM-NEXT: srli a0, a0, 32 ; RV64IM-NEXT: ret %1 = udiv i32 %a, %b ret i32 %1 @@ -1235,9 +1235,9 @@ define zeroext i32 @zext_remuw_aext_sext(i32 %a, i32 signext %b) nounwind { define zeroext i32 @zext_remuw_aext_zext(i32 %a, i32 zeroext %b) nounwind { ; RV64IM-LABEL: zext_remuw_aext_zext: ; RV64IM: # %bb.0: +; RV64IM-NEXT: remuw a0, a0, a1 ; RV64IM-NEXT: slli a0, a0, 32 ; RV64IM-NEXT: srli a0, a0, 32 -; RV64IM-NEXT: remu a0, a0, a1 ; RV64IM-NEXT: ret %1 = urem i32 %a, %b ret i32 %1 @@ -1268,9 +1268,9 @@ define zeroext i32 @zext_remuw_sext_sext(i32 signext %a, i32 signext %b) nounwin define zeroext i32 @zext_remuw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind { ; RV64IM-LABEL: zext_remuw_sext_zext: ; RV64IM: # %bb.0: +; RV64IM-NEXT: remuw a0, a0, a1 ; RV64IM-NEXT: slli a0, a0, 32 ; RV64IM-NEXT: srli a0, a0, 32 -; RV64IM-NEXT: remu a0, a0, a1 ; RV64IM-NEXT: ret %1 = urem i32 %a, %b ret i32 %1 @@ -1279,9 +1279,9 @@ define zeroext i32 @zext_remuw_sext_zext(i32 signext %a, i32 zeroext %b) nounwin define zeroext i32 @zext_remuw_zext_aext(i32 zeroext %a, i32 %b) nounwind { ; RV64IM-LABEL: zext_remuw_zext_aext: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a1, 32 -; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: remu a0, a0, a1 +; RV64IM-NEXT: remuw a0, a0, a1 +; RV64IM-NEXT: slli a0, a0, 32 +; RV64IM-NEXT: srli a0, a0, 32 ; RV64IM-NEXT: ret %1 = urem i32 %a, %b ret i32 %1 @@ -1290,9 +1290,9 @@ define zeroext i32 @zext_remuw_zext_aext(i32 zeroext %a, i32 %b) nounwind { define zeroext i32 @zext_remuw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind { ; RV64IM-LABEL: zext_remuw_zext_sext: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a1, 32 -; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: remu a0, a0, a1 +; RV64IM-NEXT: remuw a0, a0, a1 +; RV64IM-NEXT: slli a0, a0, 32 +; RV64IM-NEXT: srli a0, a0, 32 ; RV64IM-NEXT: ret %1 = urem i32 %a, %b ret i32 %1 From badf21818392dd0869e024d254d307e17eb766d4 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Fri, 25 Jan 2019 14:33:08 +0000 Subject: [PATCH 06/11] [RISCV] Add tests to demonstrate bitcasted fneg/fabs dagcombines This target-independent code won't trigger for cases such as RV32FD where custom SelectionDAG nodes are generated. These new tests demonstrate such cases. Additionally, float-arith.ll was updated so that fneg.s, fsgnjn.s, and fabs.s selection patterns are actually exercised. llvm-svn: 352199 --- .../RISCV/double-bitmanip-dagcombines.ll | 80 +++++++++++++++++++ llvm/test/CodeGen/RISCV/float-arith.ll | 58 ++++++++------ .../RISCV/float-bitmanip-dagcombines.ll | 49 ++++++++++++ 3 files changed, 162 insertions(+), 25 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll create mode 100644 llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll new file mode 100644 index 0000000000000..7ad72cca60c43 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll @@ -0,0 +1,80 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s +; +; This file tests cases where simple floating point operations can be +; profitably handled though bit manipulation if a soft-float ABI is being used +; (e.g. fneg implemented by XORing the sign bit). This is typically handled in +; DAGCombiner::visitBITCAST, but this target-independent code may not trigger +; in cases where we perform custom legalisation (e.g. RV32IFD). + +; TODO: Add an appropriate target-specific DAG combine that can handle +; RISCVISD::SplitF64/BuildPairF64 used for RV32IFD. + +define double @fneg(double %a) nounwind { +; RV32I-LABEL: fneg: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: xor a1, a1, a2 +; RV32I-NEXT: ret +; +; RV32IFD-LABEL: fneg: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: fneg.d ft0, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64I-LABEL: fneg: +; RV64I: # %bb.0: +; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: ret + %1 = fneg double %a + ret double %1 +} + +declare double @llvm.fabs.f64(double) + +define double @fabs(double %a) nounwind { +; RV32I-LABEL: fabs: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: addi a2, a2, -1 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: ret +; +; RV32IFD-LABEL: fabs: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: fabs.d ft0, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64I-LABEL: fabs: +; RV64I: # %bb.0: +; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: addi a1, a1, -1 +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: ret + %1 = call double @llvm.fabs.f64(double %a) + ret double %1 +} diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll index ab874476541ec..25e09471fad49 100644 --- a/llvm/test/CodeGen/RISCV/float-arith.ll +++ b/llvm/test/CodeGen/RISCV/float-arith.ll @@ -81,50 +81,58 @@ define float @fsgnj_s(float %a, float %b) nounwind { ret float %1 } -define float @fneg_s(float %a) nounwind { -; TODO: doesn't test the fneg selection pattern because -; DAGCombiner::visitBITCAST will generate a xor on the incoming integer -; argument +; This function performs extra work to ensure that +; DAGCombiner::visitBITCAST doesn't replace the fneg with an xor. +define i32 @fneg_s(float %a, float %b) nounwind { ; RV32IF-LABEL: fneg_s: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a1, 524288 -; RV32IF-NEXT: xor a0, a0, a1 +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: fadd.s ft0, ft0, ft0 +; RV32IF-NEXT: fneg.s ft1, ft0 +; RV32IF-NEXT: feq.s a0, ft0, ft1 ; RV32IF-NEXT: ret - %1 = fsub float -0.0, %a - ret float %1 + %1 = fadd float %a, %a + %2 = fneg float %1 + %3 = fcmp oeq float %1, %2 + %4 = zext i1 %3 to i32 + ret i32 %4 } +; This function performs extra work to ensure that +; DAGCombiner::visitBITCAST doesn't replace the fneg with an xor. define float @fsgnjn_s(float %a, float %b) nounwind { -; TODO: fsgnjn.s isn't selected because DAGCombiner::visitBITCAST will convert -; (bitconvert (fneg x)) to a xor ; RV32IF-LABEL: fsgnjn_s: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a2, 524288 -; RV32IF-NEXT: xor a1, a1, a2 ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 -; RV32IF-NEXT: fsgnj.s ft0, ft1, ft0 +; RV32IF-NEXT: fadd.s ft0, ft1, ft0 +; RV32IF-NEXT: fsgnjn.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret - %1 = fsub float -0.0, %b - %2 = call float @llvm.copysign.f32(float %a, float %1) - ret float %2 + %1 = fadd float %a, %b + %2 = fneg float %1 + %3 = call float @llvm.copysign.f32(float %a, float %2) + ret float %3 } declare float @llvm.fabs.f32(float) -define float @fabs_s(float %a) nounwind { -; TODO: doesn't test the fabs selection pattern because -; DAGCombiner::visitBITCAST will generate an and on the incoming integer -; argument +; This function performs extra work to ensure that +; DAGCombiner::visitBITCAST doesn't replace the fabs with an and. +define float @fabs_s(float %a, float %b) nounwind { ; RV32IF-LABEL: fabs_s: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a1, 524288 -; RV32IF-NEXT: addi a1, a1, -1 -; RV32IF-NEXT: and a0, a0, a1 +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fadd.s ft0, ft1, ft0 +; RV32IF-NEXT: fabs.s ft1, ft0 +; RV32IF-NEXT: fadd.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret - %1 = call float @llvm.fabs.f32(float %a) - ret float %1 + %1 = fadd float %a, %b + %2 = call float @llvm.fabs.f32(float %1) + %3 = fadd float %2, %1 + ret float %3 } declare float @llvm.minnum.f32(float, float) diff --git a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll new file mode 100644 index 0000000000000..0911481e5c88b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s + +; This file tests cases where simple floating point operations can be +; profitably handled though bit manipulation if a soft-float ABI is being used +; (e.g. fneg implemented by XORing the sign bit). This is typically handled in +; DAGCombiner::visitBITCAST, but this target-independent code may not trigger +; in cases where we perform custom legalisation (e.g. RV64F). + +define float @fneg(float %a) nounwind { +; RV32I-LABEL: fneg: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a1, 524288 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fneg: +; RV64I: # %bb.0: +; RV64I-NEXT: lui a1, 524288 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: ret + %1 = fneg float %a + ret float %1 +} + +declare float @llvm.fabs.f32(float) + +define float @fabs(float %a) nounwind { +; RV32I-LABEL: fabs: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a1, 524288 +; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fabs: +; RV64I: # %bb.0: +; RV64I-NEXT: lui a1, 524288 +; RV64I-NEXT: addiw a1, a1, -1 +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: ret + %1 = call float @llvm.fabs.f32(float %a) + ret float %1 +} From 16304fba00248fe1c57e83caa08a934051ef0670 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Fri, 25 Jan 2019 16:04:04 +0000 Subject: [PATCH 07/11] [RISCV][NFC] s/f32/f64 in double-arith.ll The intrinsic names erroneously used the .f32 variant. As the return and argument types were still double the intrinsics calls worked properly. llvm-svn: 352211 --- llvm/test/CodeGen/RISCV/double-arith.ll | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll index cd3a1d9637832..7262eac404260 100644 --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -86,7 +86,7 @@ define double @fdiv_d(double %a, double %b) nounwind { ret double %1 } -declare double @llvm.sqrt.f32(double) +declare double @llvm.sqrt.f64(double) define double @fsqrt_d(double %a) nounwind { ; RV32IFD-LABEL: fsqrt_d: @@ -101,11 +101,11 @@ define double @fsqrt_d(double %a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret - %1 = call double @llvm.sqrt.f32(double %a) + %1 = call double @llvm.sqrt.f64(double %a) ret double %1 } -declare double @llvm.copysign.f32(double, double) +declare double @llvm.copysign.f64(double, double) define double @fsgnj_d(double %a, double %b) nounwind { ; RV32IFD-LABEL: fsgnj_d: @@ -123,7 +123,7 @@ define double @fsgnj_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret - %1 = call double @llvm.copysign.f32(double %a, double %b) + %1 = call double @llvm.copysign.f64(double %a, double %b) ret double %1 } @@ -161,11 +161,11 @@ define double @fsgnjn_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret %1 = fsub double -0.0, %b - %2 = call double @llvm.copysign.f32(double %a, double %1) + %2 = call double @llvm.copysign.f64(double %a, double %1) ret double %2 } -declare double @llvm.fabs.f32(double) +declare double @llvm.fabs.f64(double) define double @fabs_d(double %a) nounwind { ; RV32IFD-LABEL: fabs_d: @@ -180,11 +180,11 @@ define double @fabs_d(double %a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret - %1 = call double @llvm.fabs.f32(double %a) + %1 = call double @llvm.fabs.f64(double %a) ret double %1 } -declare double @llvm.minnum.f32(double, double) +declare double @llvm.minnum.f64(double, double) define double @fmin_d(double %a, double %b) nounwind { ; RV32IFD-LABEL: fmin_d: @@ -202,11 +202,11 @@ define double @fmin_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret - %1 = call double @llvm.minnum.f32(double %a, double %b) + %1 = call double @llvm.minnum.f64(double %a, double %b) ret double %1 } -declare double @llvm.maxnum.f32(double, double) +declare double @llvm.maxnum.f64(double, double) define double @fmax_d(double %a, double %b) nounwind { ; RV32IFD-LABEL: fmax_d: @@ -224,7 +224,7 @@ define double @fmax_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret - %1 = call double @llvm.maxnum.f32(double %a, double %b) + %1 = call double @llvm.maxnum.f64(double %a, double %b) ret double %1 } From 750989788c6db3263c162d56d6f9ca8914b46862 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Fri, 25 Jan 2019 21:06:47 +0000 Subject: [PATCH 08/11] [RISCV] Add another potential combine to {double,float}-bitmanip-dagcombines.ll (fcopysign a, (fneg b)) will be expanded to bitwise operations by DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN if the floating point type isn't legal. Arguably it might be worth doing a combine even if it is legal. llvm-svn: 352240 --- .../RISCV/double-bitmanip-dagcombines.ll | 48 ++++++++++++++++ .../RISCV/float-bitmanip-dagcombines.ll | 56 ++++++++++++++++++- 2 files changed, 103 insertions(+), 1 deletion(-) diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll index 7ad72cca60c43..4e0fbbd35d674 100644 --- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll @@ -78,3 +78,51 @@ define double @fabs(double %a) nounwind { %1 = call double @llvm.fabs.f64(double %a) ret double %1 } + +declare double @llvm.copysign.f64(double, double) + +; DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN will convert to bitwise +; operations if floating point isn't supported. A combine could be written to +; do the same even when f64 is legal. + +define double @fcopysign_fneg(double %a, double %b) nounwind { +; RV32I-LABEL: fcopysign_fneg: +; RV32I: # %bb.0: +; RV32I-NEXT: not a2, a3 +; RV32I-NEXT: lui a3, 524288 +; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: addi a3, a3, -1 +; RV32I-NEXT: and a1, a1, a3 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: ret +; +; RV32IFD-LABEL: fcopysign_fneg: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fsgnjn.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64I-LABEL: fcopysign_fneg: +; RV64I: # %bb.0: +; RV64I-NEXT: addi a2, zero, -1 +; RV64I-NEXT: slli a2, a2, 63 +; RV64I-NEXT: not a1, a1 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: addi a2, a2, -1 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: ret + %1 = fneg double %b + %2 = call double @llvm.copysign.f64(double %a, double %1) + ret double %2 +} diff --git a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll index 0911481e5c88b..01f3152c49769 100644 --- a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ -; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: | FileCheck -check-prefix=RV32IF %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s @@ -19,6 +19,12 @@ define float @fneg(float %a) nounwind { ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: ret ; +; RV32IF-LABEL: fneg: +; RV32IF: # %bb.0: +; RV32IF-NEXT: lui a1, 524288 +; RV32IF-NEXT: xor a0, a0, a1 +; RV32IF-NEXT: ret +; ; RV64I-LABEL: fneg: ; RV64I: # %bb.0: ; RV64I-NEXT: lui a1, 524288 @@ -38,6 +44,13 @@ define float @fabs(float %a) nounwind { ; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; +; RV32IF-LABEL: fabs: +; RV32IF: # %bb.0: +; RV32IF-NEXT: lui a1, 524288 +; RV32IF-NEXT: addi a1, a1, -1 +; RV32IF-NEXT: and a0, a0, a1 +; RV32IF-NEXT: ret +; ; RV64I-LABEL: fabs: ; RV64I: # %bb.0: ; RV64I-NEXT: lui a1, 524288 @@ -47,3 +60,44 @@ define float @fabs(float %a) nounwind { %1 = call float @llvm.fabs.f32(float %a) ret float %1 } + +declare float @llvm.copysign.f32(float, float) + +; DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN will convert to bitwise +; operations if floating point isn't supported. A combine could be written to +; do the same even when f32 is legal. + +define float @fcopysign_fneg(float %a, float %b) nounwind { +; RV32I-LABEL: fcopysign_fneg: +; RV32I: # %bb.0: +; RV32I-NEXT: not a1, a1 +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: addi a2, a2, -1 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32IF-LABEL: fcopysign_fneg: +; RV32IF: # %bb.0: +; RV32IF-NEXT: lui a2, 524288 +; RV32IF-NEXT: xor a1, a1, a2 +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fsgnj.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64I-LABEL: fcopysign_fneg: +; RV64I: # %bb.0: +; RV64I-NEXT: not a1, a1 +; RV64I-NEXT: lui a2, 524288 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: addiw a2, a2, -1 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: ret + %1 = fneg float %b + %2 = call float @llvm.copysign.f32(float %a, float %1) + ret float %2 +} From 15481505b1ab4b0288ccb08b324c0e56d669f7cf Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Fri, 25 Jan 2019 21:55:48 +0000 Subject: [PATCH 09/11] [RISCV] Add target DAG combine for bitcast fabs/fneg on RV32FD DAGCombiner::visitBITCAST will perform: fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) As shown in double-bitmanip-dagcombines.ll, this can be advantageous. But RV32FD doesn't use bitcast directly (as i64 isn't a legal type), and instead uses RISCVISD::SplitF64. This patch adds an equivalent DAG combine for SplitF64. llvm-svn: 352247 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 31 ++++++++++++++-- llvm/test/CodeGen/RISCV/double-arith.ll | 37 +++++++++++++------ .../RISCV/double-bitmanip-dagcombines.ll | 23 +++--------- llvm/test/CodeGen/RISCV/double-intrinsics.ll | 12 ++---- 4 files changed, 61 insertions(+), 42 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index cd0e871fa80d3..6c507c9a0475f 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -585,16 +585,41 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { + SelectionDAG &DAG = DCI.DAG; + switch (N->getOpcode()) { default: break; case RISCVISD::SplitF64: { + SDValue Op0 = N->getOperand(0); // If the input to SplitF64 is just BuildPairF64 then the operation is // redundant. Instead, use BuildPairF64's operands directly. - SDValue Op0 = N->getOperand(0); - if (Op0->getOpcode() != RISCVISD::BuildPairF64) + if (Op0->getOpcode() == RISCVISD::BuildPairF64) + return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); + + SDLoc DL(N); + // This is a target-specific version of a DAGCombine performed in + // DAGCombiner::visitBITCAST. It performs the equivalent of: + // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) + // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) + if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || + !Op0.getNode()->hasOneUse()) break; - return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); + SDValue NewSplitF64 = + DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), + Op0.getOperand(0)); + SDValue Lo = NewSplitF64.getValue(0); + SDValue Hi = NewSplitF64.getValue(1); + APInt SignBit = APInt::getSignMask(32); + if (Op0.getOpcode() == ISD::FNEG) { + SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, + DAG.getConstant(SignBit, DL, MVT::i32)); + return DCI.CombineTo(N, Lo, NewHi); + } + assert(Op0.getOpcode() == ISD::FABS); + SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, + DAG.getConstant(~SignBit, DL, MVT::i32)); + return DCI.CombineTo(N, Lo, NewHi); } case RISCVISD::SLLW: case RISCVISD::SRAW: diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll index 7262eac404260..a5243ea18ab14 100644 --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -127,21 +127,25 @@ define double @fsgnj_d(double %a, double %b) nounwind { ret double %1 } -define double @fneg_d(double %a) nounwind { +; This function performs extra work to ensure that +; DAGCombiner::visitBITCAST doesn't replace the fneg with an xor. +define i32 @fneg_d(double %a, double %b) nounwind { ; RV32IFD-LABEL: fneg_d: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw a0, 8(sp) ; RV32IFD-NEXT: sw a1, 12(sp) ; RV32IFD-NEXT: fld ft0, 8(sp) -; RV32IFD-NEXT: fneg.d ft0, ft0 -; RV32IFD-NEXT: fsd ft0, 8(sp) -; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: fadd.d ft0, ft0, ft0 +; RV32IFD-NEXT: fneg.d ft1, ft0 +; RV32IFD-NEXT: feq.d a0, ft0, ft1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret - %1 = fsub double -0.0, %a - ret double %1 + %1 = fadd double %a, %a + %2 = fneg double %1 + %3 = fcmp oeq double %1, %2 + %4 = zext i1 %3 to i32 + ret i32 %4 } define double @fsgnjn_d(double %a, double %b) nounwind { @@ -167,21 +171,30 @@ define double @fsgnjn_d(double %a, double %b) nounwind { declare double @llvm.fabs.f64(double) -define double @fabs_d(double %a) nounwind { +; This function performs extra work to ensure that +; DAGCombiner::visitBITCAST doesn't replace the fabs with an and. +define double @fabs_d(double %a, double %b) nounwind { ; RV32IFD-LABEL: fabs_d: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) ; RV32IFD-NEXT: sw a0, 8(sp) ; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) -; RV32IFD-NEXT: fabs.d ft0, ft0 +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV32IFD-NEXT: fabs.d ft1, ft0 +; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 ; RV32IFD-NEXT: fsd ft0, 8(sp) ; RV32IFD-NEXT: lw a0, 8(sp) ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret - %1 = call double @llvm.fabs.f64(double %a) - ret double %1 + %1 = fadd double %a, %b + %2 = call double @llvm.fabs.f64(double %1) + %3 = fadd double %2, %1 + ret double %3 } declare double @llvm.minnum.f64(double, double) diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll index 4e0fbbd35d674..25364cf4e48d3 100644 --- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll @@ -24,15 +24,8 @@ define double @fneg(double %a) nounwind { ; ; RV32IFD-LABEL: fneg: ; RV32IFD: # %bb.0: -; RV32IFD-NEXT: addi sp, sp, -16 -; RV32IFD-NEXT: sw a0, 8(sp) -; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) -; RV32IFD-NEXT: fneg.d ft0, ft0 -; RV32IFD-NEXT: fsd ft0, 8(sp) -; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: lw a1, 12(sp) -; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: lui a2, 524288 +; RV32IFD-NEXT: xor a1, a1, a2 ; RV32IFD-NEXT: ret ; ; RV64I-LABEL: fneg: @@ -57,15 +50,9 @@ define double @fabs(double %a) nounwind { ; ; RV32IFD-LABEL: fabs: ; RV32IFD: # %bb.0: -; RV32IFD-NEXT: addi sp, sp, -16 -; RV32IFD-NEXT: sw a0, 8(sp) -; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) -; RV32IFD-NEXT: fabs.d ft0, ft0 -; RV32IFD-NEXT: fsd ft0, 8(sp) -; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: lw a1, 12(sp) -; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: lui a2, 524288 +; RV32IFD-NEXT: addi a2, a2, -1 +; RV32IFD-NEXT: and a1, a1, a2 ; RV32IFD-NEXT: ret ; ; RV64I-LABEL: fabs: diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll index cd14c1932d471..7e69bb1743a4f 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -254,15 +254,9 @@ declare double @llvm.fabs.f64(double) define double @fabs_f64(double %a) nounwind { ; RV32IFD-LABEL: fabs_f64: ; RV32IFD: # %bb.0: -; RV32IFD-NEXT: addi sp, sp, -16 -; RV32IFD-NEXT: sw a0, 8(sp) -; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) -; RV32IFD-NEXT: fabs.d ft0, ft0 -; RV32IFD-NEXT: fsd ft0, 8(sp) -; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: lw a1, 12(sp) -; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: lui a2, 524288 +; RV32IFD-NEXT: addi a2, a2, -1 +; RV32IFD-NEXT: and a1, a1, a2 ; RV32IFD-NEXT: ret %1 = call double @llvm.fabs.f64(double %a) ret double %1 From c6aca6d423abf72e9149b6e4ba22ec6ebe4b7d10 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Thu, 31 Jan 2019 22:48:38 +0000 Subject: [PATCH 10/11] [RISCV] Add RV64F codegen support This requires a little extra work due tothe fact i32 is not a legal type. When call lowering happens post-legalisation (e.g. when an intrinsic was inserted during legalisation). A bitcast from f32 to i32 can't be introduced. This is similar to the challenges with RV32D. To handle this, we introduce target-specific DAG nodes that perform bitcast+anyext for f32->i64 and trunc+bitcast for i64->f32. Differential Revision: https://reviews.llvm.org/D53235 llvm-svn: 352807 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 75 ++++- llvm/lib/Target/RISCV/RISCVISelLowering.h | 9 +- llvm/lib/Target/RISCV/RISCVInstrInfoF.td | 48 +++ llvm/test/CodeGen/RISCV/float-arith.ll | 162 +++++++++ .../RISCV/float-bitmanip-dagcombines.ll | 23 ++ llvm/test/CodeGen/RISCV/float-br-fcmp.ll | 310 ++++++++++++++++++ llvm/test/CodeGen/RISCV/float-convert.ll | 122 +++++++ llvm/test/CodeGen/RISCV/float-fcmp.ll | 134 ++++++++ llvm/test/CodeGen/RISCV/float-imm.ll | 21 ++ llvm/test/CodeGen/RISCV/float-mem.ll | 76 +++++ llvm/test/CodeGen/RISCV/float-select-fcmp.ll | 215 ++++++++++++ .../CodeGen/RISCV/rv32i-rv64i-float-double.ll | 86 +++++ .../test/CodeGen/RISCV/rv64f-float-convert.ll | 187 +++++++++++ 13 files changed, 1466 insertions(+), 2 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll create mode 100644 llvm/test/CodeGen/RISCV/rv64f-float-convert.ll diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 6c507c9a0475f..7f880d28e7635 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -138,6 +138,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(Op, MVT::f32, Expand); } + if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) + setOperationAction(ISD::BITCAST, MVT::i32, Custom); + if (Subtarget.hasStdExtD()) { setOperationAction(ISD::FMINNUM, MVT::f64, Legal); setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); @@ -339,6 +342,17 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, return lowerFRAMEADDR(Op, DAG); case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG); + case ISD::BITCAST: { + assert(Subtarget.is64Bit() && Subtarget.hasStdExtF() && + "Unexpected custom legalisation"); + SDLoc DL(Op); + SDValue Op0 = Op.getOperand(0); + if (Op.getValueType() != MVT::f32 || Op0.getValueType() != MVT::i32) + return SDValue(); + SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); + SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); + return FPConv; + } } } @@ -580,6 +594,18 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, return; Results.push_back(customLegalizeToWOp(N, DAG)); break; + case ISD::BITCAST: { + assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && + Subtarget.hasStdExtF() && "Unexpected custom legalisation"); + SDLoc DL(N); + SDValue Op0 = N->getOperand(0); + if (Op0.getValueType() != MVT::f32) + return; + SDValue FPConv = + DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); + break; + } } } @@ -634,6 +660,38 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, return SDValue(); break; } + case RISCVISD::FMV_X_ANYEXTW_RV64: { + SDLoc DL(N); + SDValue Op0 = N->getOperand(0); + // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the + // conversion is unnecessary and can be replaced with an ANY_EXTEND + // of the FMV_W_X_RV64 operand. + if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { + SDValue AExtOp = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0.getOperand(0)); + return DCI.CombineTo(N, AExtOp); + } + + // This is a target-specific version of a DAGCombine performed in + // DAGCombiner::visitBITCAST. It performs the equivalent of: + // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) + // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) + if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || + !Op0.getNode()->hasOneUse()) + break; + SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, + Op0.getOperand(0)); + APInt SignBit = APInt::getSignMask(32).sext(64); + if (Op0.getOpcode() == ISD::FNEG) { + return DCI.CombineTo(N, + DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, + DAG.getConstant(SignBit, DL, MVT::i64))); + } + assert(Op0.getOpcode() == ISD::FABS); + return DCI.CombineTo(N, + DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, + DAG.getConstant(~SignBit, DL, MVT::i64))); + } } return SDValue(); @@ -875,7 +933,7 @@ static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT, assert(XLen == 32 || XLen == 64); MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; if (ValVT == MVT::f32) { - LocVT = MVT::i32; + LocVT = XLenVT; LocInfo = CCValAssign::BCvt; } @@ -1048,6 +1106,10 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, case CCValAssign::Full: break; case CCValAssign::BCvt: + if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) { + Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); + break; + } Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); break; } @@ -1083,6 +1145,10 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, case CCValAssign::Full: break; case CCValAssign::BCvt: + if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) { + Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); + break; + } Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); break; } @@ -1109,9 +1175,12 @@ static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, llvm_unreachable("Unexpected CCValAssign::LocInfo"); case CCValAssign::Full: case CCValAssign::Indirect: + case CCValAssign::BCvt: ExtType = ISD::NON_EXTLOAD; break; } + if (ValVT == MVT::f32) + LocVT = MVT::f32; Val = DAG.getExtLoad( ExtType, DL, LocVT, Chain, FIN, MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); @@ -1760,6 +1829,10 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { return "RISCVISD::DIVUW"; case RISCVISD::REMUW: return "RISCVISD::REMUW"; + case RISCVISD::FMV_W_X_RV64: + return "RISCVISD::FMV_W_X_RV64"; + case RISCVISD::FMV_X_ANYEXTW_RV64: + return "RISCVISD::FMV_X_ANYEXTW_RV64"; } return nullptr; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index ddc622a1c9da0..3be255644b659 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -42,7 +42,14 @@ enum NodeType : unsigned { // at instruction selection time. DIVW, DIVUW, - REMUW + REMUW, + // FPR32<->GPR transfer operations for RV64. Needed as an i32<->f32 bitcast + // is not legal on RV64. FMV_W_X_RV64 matches the semantics of the FMV.W.X. + // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result. + // This is a more convenient semantic for producing dagcombines that remove + // unnecessary GPR->FPR->GPR moves. + FMV_W_X_RV64, + FMV_X_ANYEXTW_RV64 }; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td index 03bdac45873d0..ed05afe7e593a 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -12,6 +12,20 @@ // //===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// RISC-V specific DAG Nodes. +//===----------------------------------------------------------------------===// + +def SDT_RISCVFMV_W_X_RV64 + : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i64>]>; +def SDT_RISCVFMV_X_ANYEXTW_RV64 + : SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, f32>]>; + +def riscv_fmv_w_x_rv64 + : SDNode<"RISCVISD::FMV_W_X_RV64", SDT_RISCVFMV_W_X_RV64>; +def riscv_fmv_x_anyextw_rv64 + : SDNode<"RISCVISD::FMV_X_ANYEXTW_RV64", SDT_RISCVFMV_X_ANYEXTW_RV64>; + //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. //===----------------------------------------------------------------------===// @@ -334,3 +348,37 @@ def : Pat<(fp_to_uint FPR32:$rs1), (FCVT_WU_S $rs1, 0b001)>; def : Pat<(sint_to_fp GPR:$rs1), (FCVT_S_W $rs1, 0b111)>; def : Pat<(uint_to_fp GPR:$rs1), (FCVT_S_WU $rs1, 0b111)>; } // Predicates = [HasStdExtF, IsRV32] + +let Predicates = [HasStdExtF, IsRV32] in { +// FP->[u]int. Round-to-zero must be used +def : Pat<(fp_to_sint FPR32:$rs1), (FCVT_W_S $rs1, 0b001)>; +def : Pat<(fp_to_uint FPR32:$rs1), (FCVT_WU_S $rs1, 0b001)>; + +// [u]int->fp. Match GCC and default to using dynamic rounding mode. +def : Pat<(sint_to_fp GPR:$rs1), (FCVT_S_W $rs1, 0b111)>; +def : Pat<(uint_to_fp GPR:$rs1), (FCVT_S_WU $rs1, 0b111)>; +} // Predicates = [HasStdExtF, IsRV32] + +let Predicates = [HasStdExtF, IsRV64] in { +def : Pat<(riscv_fmv_w_x_rv64 GPR:$src), (FMV_W_X GPR:$src)>; +def : Pat<(riscv_fmv_x_anyextw_rv64 FPR32:$src), (FMV_X_W FPR32:$src)>; +def : Pat<(sexti32 (riscv_fmv_x_anyextw_rv64 FPR32:$src)), + (FMV_X_W FPR32:$src)>; + +// FP->[u]int32 is mostly handled by the FP->[u]int64 patterns. This is safe +// because fpto[u|s]i produces poison if the value can't fit into the target. +// We match the single case below because fcvt.wu.s sign-extends its result so +// is cheaper than fcvt.lu.s+sext.w. +def : Pat<(sext_inreg (assertzexti32 (fp_to_uint FPR32:$rs1)), i32), + (FCVT_WU_S $rs1, 0b001)>; + +// FP->[u]int64 +def : Pat<(fp_to_sint FPR32:$rs1), (FCVT_L_S $rs1, 0b001)>; +def : Pat<(fp_to_uint FPR32:$rs1), (FCVT_LU_S $rs1, 0b001)>; + +// [u]int->fp. Match GCC and default to using dynamic rounding mode. +def : Pat<(sint_to_fp (sext_inreg GPR:$rs1, i32)), (FCVT_S_W $rs1, 0b111)>; +def : Pat<(uint_to_fp (zexti32 GPR:$rs1)), (FCVT_S_WU $rs1, 0b111)>; +def : Pat<(sint_to_fp GPR:$rs1), (FCVT_S_L $rs1, 0b111)>; +def : Pat<(uint_to_fp GPR:$rs1), (FCVT_S_LU $rs1, 0b111)>; +} // Predicates = [HasStdExtF, IsRV64] diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll index 25e09471fad49..a668b7e494274 100644 --- a/llvm/test/CodeGen/RISCV/float-arith.ll +++ b/llvm/test/CodeGen/RISCV/float-arith.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IF %s +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IF %s ; These tests are each targeted at a particular RISC-V FPU instruction. Most ; other files in this folder exercise LLVM IR instructions that don't directly @@ -14,6 +16,14 @@ define float @fadd_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fadd_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fadd float %a, %b ret float %1 } @@ -26,6 +36,14 @@ define float @fsub_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fsub.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fsub_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fsub.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fsub float %a, %b ret float %1 } @@ -38,6 +56,14 @@ define float @fmul_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fmul.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmul_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fmul.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fmul float %a, %b ret float %1 } @@ -50,6 +76,14 @@ define float @fdiv_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fdiv.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fdiv_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fdiv.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fdiv float %a, %b ret float %1 } @@ -63,6 +97,13 @@ define float @fsqrt_s(float %a) nounwind { ; RV32IF-NEXT: fsqrt.s ft0, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fsqrt_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fsqrt.s ft0, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = call float @llvm.sqrt.f32(float %a) ret float %1 } @@ -77,6 +118,14 @@ define float @fsgnj_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fsgnj_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fsgnj.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = call float @llvm.copysign.f32(float %a, float %b) ret float %1 } @@ -91,6 +140,14 @@ define i32 @fneg_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fneg.s ft1, ft0 ; RV32IF-NEXT: feq.s a0, ft0, ft1 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fneg_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fadd.s ft0, ft0, ft0 +; RV64IF-NEXT: fneg.s ft1, ft0 +; RV64IF-NEXT: feq.s a0, ft0, ft1 +; RV64IF-NEXT: ret %1 = fadd float %a, %a %2 = fneg float %1 %3 = fcmp oeq float %1, %2 @@ -109,6 +166,15 @@ define float @fsgnjn_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fsgnjn.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fsgnjn_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fsgnjn.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fadd float %a, %b %2 = fneg float %1 %3 = call float @llvm.copysign.f32(float %a, float %2) @@ -129,6 +195,16 @@ define float @fabs_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fabs_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fabs.s ft1, ft0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fadd float %a, %b %2 = call float @llvm.fabs.f32(float %1) %3 = fadd float %2, %1 @@ -145,6 +221,14 @@ define float @fmin_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fmin.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmin_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fmin.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = call float @llvm.minnum.f32(float %a, float %b) ret float %1 } @@ -159,6 +243,14 @@ define float @fmax_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fmax.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmax_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fmax.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = call float @llvm.maxnum.f32(float %a, float %b) ret float %1 } @@ -170,6 +262,13 @@ define i32 @feq_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: feq.s a0, ft1, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: feq_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: feq.s a0, ft1, ft0 +; RV64IF-NEXT: ret %1 = fcmp oeq float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -182,6 +281,13 @@ define i32 @flt_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: flt_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: ret %1 = fcmp olt float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -194,6 +300,13 @@ define i32 @fle_s(float %a, float %b) nounwind { ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: fle.s a0, ft1, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fle_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: ret %1 = fcmp ole float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -210,6 +323,15 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind { ; RV32IF-NEXT: fmadd.s ft0, ft2, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmadd_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a2 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft2, a0 +; RV64IF-NEXT: fmadd.s ft0, ft2, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = call float @llvm.fma.f32(float %a, float %b, float %c) ret float %1 } @@ -227,6 +349,19 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind { ; RV32IF-NEXT: fmsub.s ft0, ft2, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmsub_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a2 +; RV64IF-NEXT: lui a2, %hi(.LCPI15_0) +; RV64IF-NEXT: addi a2, a2, %lo(.LCPI15_0) +; RV64IF-NEXT: flw ft1, 0(a2) +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft2, a0 +; RV64IF-NEXT: fmsub.s ft0, ft2, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %c_ = fadd float 0.0, %c ; avoid negation using xor %negc = fsub float -0.0, %c_ %1 = call float @llvm.fma.f32(float %a, float %b, float %negc) @@ -247,6 +382,20 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind { ; RV32IF-NEXT: fnmadd.s ft0, ft1, ft2, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fnmadd_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a2 +; RV64IF-NEXT: lui a2, %hi(.LCPI16_0) +; RV64IF-NEXT: addi a2, a2, %lo(.LCPI16_0) +; RV64IF-NEXT: flw ft1, 0(a2) +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: fmv.w.x ft2, a0 +; RV64IF-NEXT: fadd.s ft1, ft2, ft1 +; RV64IF-NEXT: fmv.w.x ft2, a1 +; RV64IF-NEXT: fnmadd.s ft0, ft1, ft2, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %a_ = fadd float 0.0, %a %c_ = fadd float 0.0, %c %nega = fsub float -0.0, %a_ @@ -268,6 +417,19 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind { ; RV32IF-NEXT: fnmsub.s ft0, ft0, ft2, ft1 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fnmsub_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: lui a0, %hi(.LCPI17_0) +; RV64IF-NEXT: addi a0, a0, %lo(.LCPI17_0) +; RV64IF-NEXT: flw ft1, 0(a0) +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: fmv.w.x ft1, a2 +; RV64IF-NEXT: fmv.w.x ft2, a1 +; RV64IF-NEXT: fnmsub.s ft0, ft0, ft2, ft1 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %a_ = fadd float 0.0, %a %nega = fsub float -0.0, %a_ %1 = call float @llvm.fma.f32(float %nega, float %b, float %c) diff --git a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll index 01f3152c49769..215fe5839ef7e 100644 --- a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll @@ -5,6 +5,8 @@ ; RUN: | FileCheck -check-prefix=RV32IF %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IF %s ; This file tests cases where simple floating point operations can be ; profitably handled though bit manipulation if a soft-float ABI is being used @@ -30,6 +32,12 @@ define float @fneg(float %a) nounwind { ; RV64I-NEXT: lui a1, 524288 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: ret +; +; RV64IF-LABEL: fneg: +; RV64IF: # %bb.0: +; RV64IF-NEXT: lui a1, 524288 +; RV64IF-NEXT: xor a0, a0, a1 +; RV64IF-NEXT: ret %1 = fneg float %a ret float %1 } @@ -57,6 +65,13 @@ define float @fabs(float %a) nounwind { ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret +; +; RV64IF-LABEL: fabs: +; RV64IF: # %bb.0: +; RV64IF-NEXT: lui a1, 524288 +; RV64IF-NEXT: addiw a1, a1, -1 +; RV64IF-NEXT: and a0, a0, a1 +; RV64IF-NEXT: ret %1 = call float @llvm.fabs.f32(float %a) ret float %1 } @@ -97,6 +112,14 @@ define float @fcopysign_fneg(float %a, float %b) nounwind { ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret +; +; RV64IF-LABEL: fcopysign_fneg: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fsgnjn.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fneg float %b %2 = call float @llvm.copysign.f32(float %a, float %1) ret float %2 diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll index 5804bd0fd1f82..0e78c91bedf73 100644 --- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IF %s +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IF %s declare void @abort() declare void @exit(i32) @@ -19,6 +21,19 @@ define void @br_fcmp_false(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB0_2: # %if.else ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_false: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: addi a0, zero, 1 +; RV64IF-NEXT: bnez a0, .LBB0_2 +; RV64IF-NEXT: # %bb.1: # %if.then +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB0_2: # %if.else +; RV64IF-NEXT: call abort %1 = fcmp false float %a, %b br i1 %1, label %if.then, label %if.else if.then: @@ -43,6 +58,21 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB1_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_oeq: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: feq.s a0, ft1, ft0 +; RV64IF-NEXT: bnez a0, .LBB1_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB1_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp oeq float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -71,6 +101,22 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB2_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_oeq_alt: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: feq.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: beqz a0, .LBB2_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB2_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp oeq float %a, %b br i1 %1, label %if.then, label %if.else if.then: @@ -95,6 +141,21 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB3_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_ogt: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: bnez a0, .LBB3_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB3_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp ogt float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -119,6 +180,21 @@ define void @br_fcmp_oge(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB4_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_oge: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: bnez a0, .LBB4_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB4_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp oge float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -143,6 +219,21 @@ define void @br_fcmp_olt(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB5_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_olt: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: bnez a0, .LBB5_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB5_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp olt float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -167,6 +258,21 @@ define void @br_fcmp_ole(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB6_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_ole: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: bnez a0, .LBB6_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB6_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp ole float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -199,6 +305,28 @@ define void @br_fcmp_one(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB7_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_one: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: feq.s a1, ft0, ft1 +; RV64IF-NEXT: not a1, a1 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: bnez a0, .LBB7_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB7_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp one float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -227,6 +355,25 @@ define void @br_fcmp_ord(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB8_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_ord: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: feq.s a0, ft0, ft0 +; RV64IF-NEXT: and a0, a0, a1 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB8_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB8_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp ord float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -256,6 +403,26 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB9_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_ueq: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: feq.s a0, ft1, ft0 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: feq.s a2, ft1, ft1 +; RV64IF-NEXT: and a1, a2, a1 +; RV64IF-NEXT: seqz a1, a1 +; RV64IF-NEXT: or a0, a0, a1 +; RV64IF-NEXT: bnez a0, .LBB9_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB9_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp ueq float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -281,6 +448,22 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB10_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_ugt: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB10_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB10_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp ugt float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -306,6 +489,22 @@ define void @br_fcmp_uge(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB11_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_uge: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB11_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB11_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp uge float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -331,6 +530,22 @@ define void @br_fcmp_ult(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB12_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_ult: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB12_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB12_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp ult float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -356,6 +571,22 @@ define void @br_fcmp_ule(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB13_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_ule: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB13_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB13_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp ule float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -381,6 +612,22 @@ define void @br_fcmp_une(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB14_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_une: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: feq.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB14_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB14_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp une float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -409,6 +656,24 @@ define void @br_fcmp_uno(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB15_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_uno: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: feq.s a0, ft0, ft0 +; RV64IF-NEXT: and a0, a0, a1 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: bnez a0, .LBB15_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB15_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp uno float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -431,6 +696,19 @@ define void @br_fcmp_true(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB16_2: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_true: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: addi a0, zero, 1 +; RV64IF-NEXT: bnez a0, .LBB16_2 +; RV64IF-NEXT: # %bb.1: # %if.else +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB16_2: # %if.then +; RV64IF-NEXT: call abort %1 = fcmp true float %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -471,6 +749,38 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind { ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB17_3: # %if.then ; RV32IF-NEXT: call abort +; +; RV64IF-LABEL: br_fcmp_store_load_stack_slot: +; RV64IF: # %bb.0: # %entry +; RV64IF-NEXT: addi sp, sp, -32 +; RV64IF-NEXT: sd ra, 24(sp) +; RV64IF-NEXT: sd s1, 16(sp) +; RV64IF-NEXT: lui a0, %hi(.LCPI17_0) +; RV64IF-NEXT: addi a0, a0, %lo(.LCPI17_0) +; RV64IF-NEXT: flw ft0, 0(a0) +; RV64IF-NEXT: fsw ft0, 12(sp) +; RV64IF-NEXT: fmv.x.w s1, ft0 +; RV64IF-NEXT: mv a0, s1 +; RV64IF-NEXT: call dummy +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: flw ft1, 12(sp) +; RV64IF-NEXT: feq.s a0, ft0, ft1 +; RV64IF-NEXT: beqz a0, .LBB17_3 +; RV64IF-NEXT: # %bb.1: # %if.end +; RV64IF-NEXT: mv a0, s1 +; RV64IF-NEXT: call dummy +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: flw ft1, 12(sp) +; RV64IF-NEXT: feq.s a0, ft0, ft1 +; RV64IF-NEXT: beqz a0, .LBB17_3 +; RV64IF-NEXT: # %bb.2: # %if.end4 +; RV64IF-NEXT: mv a0, zero +; RV64IF-NEXT: ld s1, 16(sp) +; RV64IF-NEXT: ld ra, 24(sp) +; RV64IF-NEXT: addi sp, sp, 32 +; RV64IF-NEXT: ret +; RV64IF-NEXT: .LBB17_3: # %if.then +; RV64IF-NEXT: call abort entry: %call = call float @dummy(float 0.000000e+00) %cmp = fcmp une float %call, 0.000000e+00 diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll index d6e67cc6a8e1b..296e9d468fe7e 100644 --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -1,23 +1,41 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IF %s +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IF %s +; For RV64F, fcvt.l.s is semantically equivalent to fcvt.w.s in this case +; because fptosi will produce poison if the result doesn't fit into an i32. define i32 @fcvt_w_s(float %a) nounwind { ; RV32IF-LABEL: fcvt_w_s: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcvt_w_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz +; RV64IF-NEXT: ret %1 = fptosi float %a to i32 ret i32 %1 } +; For RV64F, fcvt.lu.s is semantically equivalent to fcvt.wu.s in this case +; because fptoui will produce poison if the result doesn't fit into an i32. define i32 @fcvt_wu_s(float %a) nounwind { ; RV32IF-LABEL: fcvt_wu_s: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fcvt.wu.s a0, ft0, rtz ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcvt_wu_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fcvt.lu.s a0, ft0, rtz +; RV64IF-NEXT: ret %1 = fptoui float %a to i32 ret i32 %1 } @@ -30,6 +48,14 @@ define i32 @fmv_x_w(float %a, float %b) nounwind { ; RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmv_x_w: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret ; Ensure fmv.x.w is generated even for a soft float calling convention %1 = fadd float %a, %b %2 = bitcast float %1 to i32 @@ -42,6 +68,12 @@ define float @fcvt_s_w(i32 %a) nounwind { ; RV32IF-NEXT: fcvt.s.w ft0, a0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcvt_s_w: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fcvt.s.w ft0, a0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = sitofp i32 %a to float ret float %1 } @@ -52,6 +84,12 @@ define float @fcvt_s_wu(i32 %a) nounwind { ; RV32IF-NEXT: fcvt.s.wu ft0, a0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcvt_s_wu: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fcvt.s.wu ft0, a0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = uitofp i32 %a to float ret float %1 } @@ -64,9 +102,93 @@ define float @fmv_w_x(i32 %a, i32 %b) nounwind { ; RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmv_w_x: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret ; Ensure fmv.w.x is generated even for a soft float calling convention %1 = bitcast i32 %a to float %2 = bitcast i32 %b to float %3 = fadd float %1, %2 ret float %3 } + +define i64 @fcvt_l_s(float %a) nounwind { +; RV32IF-LABEL: fcvt_l_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) +; RV32IF-NEXT: call __fixsfdi +; RV32IF-NEXT: lw ra, 12(sp) +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcvt_l_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz +; RV64IF-NEXT: ret + %1 = fptosi float %a to i64 + ret i64 %1 +} + +define i64 @fcvt_lu_s(float %a) nounwind { +; RV32IF-LABEL: fcvt_lu_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) +; RV32IF-NEXT: call __fixunssfdi +; RV32IF-NEXT: lw ra, 12(sp) +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcvt_lu_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fcvt.lu.s a0, ft0, rtz +; RV64IF-NEXT: ret + %1 = fptoui float %a to i64 + ret i64 %1 +} + +define float @fcvt_s_l(i64 %a) nounwind { +; RV32IF-LABEL: fcvt_s_l: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) +; RV32IF-NEXT: call __floatdisf +; RV32IF-NEXT: lw ra, 12(sp) +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcvt_s_l: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fcvt.s.l ft0, a0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = sitofp i64 %a to float + ret float %1 +} + +define float @fcvt_s_lu(i64 %a) nounwind { +; RV32IF-LABEL: fcvt_s_lu: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) +; RV32IF-NEXT: call __floatundisf +; RV32IF-NEXT: lw ra, 12(sp) +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcvt_s_lu: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fcvt.s.lu ft0, a0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = uitofp i64 %a to float + ret float %1 +} diff --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll index c8942a90eb762..16f441847911b 100644 --- a/llvm/test/CodeGen/RISCV/float-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll @@ -1,12 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IF %s +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IF %s define i32 @fcmp_false(float %a, float %b) nounwind { ; RV32IF-LABEL: fcmp_false: ; RV32IF: # %bb.0: ; RV32IF-NEXT: mv a0, zero ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_false: +; RV64IF: # %bb.0: +; RV64IF-NEXT: mv a0, zero +; RV64IF-NEXT: ret %1 = fcmp false float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -19,6 +26,13 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind { ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: feq.s a0, ft1, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_oeq: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: feq.s a0, ft1, ft0 +; RV64IF-NEXT: ret %1 = fcmp oeq float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -31,6 +45,13 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind { ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_ogt: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: ret %1 = fcmp ogt float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -43,6 +64,13 @@ define i32 @fcmp_oge(float %a, float %b) nounwind { ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: fle.s a0, ft1, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_oge: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: ret %1 = fcmp oge float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -55,6 +83,13 @@ define i32 @fcmp_olt(float %a, float %b) nounwind { ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_olt: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: ret %1 = fcmp olt float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -67,6 +102,13 @@ define i32 @fcmp_ole(float %a, float %b) nounwind { ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: fle.s a0, ft1, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_ole: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: ret %1 = fcmp ole float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -86,6 +128,20 @@ define i32 @fcmp_one(float %a, float %b) nounwind { ; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: and a0, a1, a0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_one: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: feq.s a1, ft0, ft1 +; RV64IF-NEXT: not a1, a1 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: ret %1 = fcmp one float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -102,6 +158,17 @@ define i32 @fcmp_ord(float %a, float %b) nounwind { ; RV32IF-NEXT: seqz a0, a0 ; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_ord: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: feq.s a0, ft0, ft0 +; RV64IF-NEXT: and a0, a0, a1 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: ret %1 = fcmp ord float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -119,6 +186,18 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind { ; RV32IF-NEXT: seqz a1, a1 ; RV32IF-NEXT: or a0, a0, a1 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_ueq: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: feq.s a0, ft1, ft0 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: feq.s a2, ft1, ft1 +; RV64IF-NEXT: and a1, a2, a1 +; RV64IF-NEXT: seqz a1, a1 +; RV64IF-NEXT: or a0, a0, a1 +; RV64IF-NEXT: ret %1 = fcmp ueq float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -132,6 +211,14 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind { ; RV32IF-NEXT: fle.s a0, ft1, ft0 ; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_ugt: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: ret %1 = fcmp ugt float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -145,6 +232,14 @@ define i32 @fcmp_uge(float %a, float %b) nounwind { ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_uge: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: ret %1 = fcmp uge float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -158,6 +253,14 @@ define i32 @fcmp_ult(float %a, float %b) nounwind { ; RV32IF-NEXT: fle.s a0, ft1, ft0 ; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_ult: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: ret %1 = fcmp ult float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -171,6 +274,14 @@ define i32 @fcmp_ule(float %a, float %b) nounwind { ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_ule: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: ret %1 = fcmp ule float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -184,6 +295,14 @@ define i32 @fcmp_une(float %a, float %b) nounwind { ; RV32IF-NEXT: feq.s a0, ft1, ft0 ; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_une: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: feq.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: ret %1 = fcmp une float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -199,6 +318,16 @@ define i32 @fcmp_uno(float %a, float %b) nounwind { ; RV32IF-NEXT: and a0, a0, a1 ; RV32IF-NEXT: seqz a0, a0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_uno: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: feq.s a0, ft0, ft0 +; RV64IF-NEXT: and a0, a0, a1 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: ret %1 = fcmp uno float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -209,6 +338,11 @@ define i32 @fcmp_true(float %a, float %b) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi a0, zero, 1 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fcmp_true: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi a0, zero, 1 +; RV64IF-NEXT: ret %1 = fcmp true float %a, %b %2 = zext i1 %1 to i32 ret i32 %2 diff --git a/llvm/test/CodeGen/RISCV/float-imm.ll b/llvm/test/CodeGen/RISCV/float-imm.ll index a8d032571e128..09d9c3c933a12 100644 --- a/llvm/test/CodeGen/RISCV/float-imm.ll +++ b/llvm/test/CodeGen/RISCV/float-imm.ll @@ -1,13 +1,24 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IF %s +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IF %s +; TODO: constant pool shouldn't be necessary for RV64IF. define float @float_imm() nounwind { ; RV32IF-LABEL: float_imm: ; RV32IF: # %bb.0: ; RV32IF-NEXT: lui a0, 263313 ; RV32IF-NEXT: addi a0, a0, -37 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: float_imm: +; RV64IF: # %bb.0: +; RV64IF-NEXT: lui a0, %hi(.LCPI0_0) +; RV64IF-NEXT: addi a0, a0, %lo(.LCPI0_0) +; RV64IF-NEXT: flw ft0, 0(a0) +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret ret float 3.14159274101257324218750 } @@ -22,6 +33,16 @@ define float @float_imm_op(float %a) nounwind { ; RV32IF-NEXT: fadd.s ft0, ft0, ft1 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: float_imm_op: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: lui a0, %hi(.LCPI1_0) +; RV64IF-NEXT: addi a0, a0, %lo(.LCPI1_0) +; RV64IF-NEXT: flw ft1, 0(a0) +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fadd float %a, 1.0 ret float %1 } diff --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll index 95a9d73d5f753..b90ee601b74a3 100644 --- a/llvm/test/CodeGen/RISCV/float-mem.ll +++ b/llvm/test/CodeGen/RISCV/float-mem.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IF %s +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IF %s define float @flw(float *%a) nounwind { ; RV32IF-LABEL: flw: @@ -10,6 +12,14 @@ define float @flw(float *%a) nounwind { ; RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: flw: +; RV64IF: # %bb.0: +; RV64IF-NEXT: flw ft0, 12(a0) +; RV64IF-NEXT: flw ft1, 0(a0) +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = load float, float* %a %2 = getelementptr float, float* %a, i32 3 %3 = load float, float* %2 @@ -30,6 +40,15 @@ define void @fsw(float *%a, float %b, float %c) nounwind { ; RV32IF-NEXT: fsw ft0, 32(a0) ; RV32IF-NEXT: fsw ft0, 0(a0) ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fsw: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a2 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fsw ft0, 32(a0) +; RV64IF-NEXT: fsw ft0, 0(a0) +; RV64IF-NEXT: ret %1 = fadd float %b, %c store float %1, float* %a %2 = getelementptr float, float* %a, i32 8 @@ -56,6 +75,20 @@ define float @flw_fsw_global(float %a, float %b) nounwind { ; RV32IF-NEXT: fsw ft0, 36(a0) ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: flw_fsw_global: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: lui a0, %hi(G) +; RV64IF-NEXT: flw ft1, %lo(G)(a0) +; RV64IF-NEXT: fsw ft0, %lo(G)(a0) +; RV64IF-NEXT: addi a0, a0, %lo(G) +; RV64IF-NEXT: flw ft1, 36(a0) +; RV64IF-NEXT: fsw ft0, 36(a0) +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fadd float %a, %b %2 = load volatile float, float* @G store float %1, float* @G @@ -76,6 +109,18 @@ define float @flw_fsw_constant(float %a) nounwind { ; RV32IF-NEXT: fsw ft0, -273(a0) ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: flw_fsw_constant: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: lui a0, 56 +; RV64IF-NEXT: addiw a0, a0, -1353 +; RV64IF-NEXT: slli a0, a0, 14 +; RV64IF-NEXT: flw ft1, -273(a0) +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: fsw ft0, -273(a0) +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = inttoptr i32 3735928559 to float* %2 = load volatile float, float* %1 %3 = fadd float %a, %2 @@ -102,6 +147,23 @@ define float @flw_stack(float %a) nounwind { ; RV32IF-NEXT: lw ra, 12(sp) ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: flw_stack: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -32 +; RV64IF-NEXT: sd ra, 24(sp) +; RV64IF-NEXT: sd s1, 16(sp) +; RV64IF-NEXT: mv s1, a0 +; RV64IF-NEXT: addi a0, sp, 12 +; RV64IF-NEXT: call notdead +; RV64IF-NEXT: fmv.w.x ft0, s1 +; RV64IF-NEXT: flw ft1, 12(sp) +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ld s1, 16(sp) +; RV64IF-NEXT: ld ra, 24(sp) +; RV64IF-NEXT: addi sp, sp, 32 +; RV64IF-NEXT: ret %1 = alloca float, align 4 %2 = bitcast float* %1 to i8* call void @notdead(i8* %2) @@ -124,6 +186,20 @@ define void @fsw_stack(float %a, float %b) nounwind { ; RV32IF-NEXT: lw ra, 12(sp) ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fsw_stack: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fsw ft0, 4(sp) +; RV64IF-NEXT: addi a0, sp, 4 +; RV64IF-NEXT: call notdead +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret %1 = fadd float %a, %b ; force store from FPR32 %2 = alloca float, align 4 store float %1, float* %2 diff --git a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll index 59d4a3f078b03..3659ed2d17504 100644 --- a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll @@ -1,12 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IF %s +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IF %s define float @select_fcmp_false(float %a, float %b) nounwind { ; RV32IF-LABEL: select_fcmp_false: ; RV32IF: # %bb.0: ; RV32IF-NEXT: mv a0, a1 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_false: +; RV64IF: # %bb.0: +; RV64IF-NEXT: mv a0, a1 +; RV64IF-NEXT: ret %1 = fcmp false float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -24,6 +31,18 @@ define float @select_fcmp_oeq(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB1_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_oeq: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: feq.s a0, ft0, ft1 +; RV64IF-NEXT: bnez a0, .LBB1_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB1_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp oeq float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -41,6 +60,18 @@ define float @select_fcmp_ogt(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB2_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_ogt: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: bnez a0, .LBB2_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB2_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp ogt float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -58,6 +89,18 @@ define float @select_fcmp_oge(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB3_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_oge: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: bnez a0, .LBB3_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB3_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp oge float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -75,6 +118,18 @@ define float @select_fcmp_olt(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB4_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_olt: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: flt.s a0, ft0, ft1 +; RV64IF-NEXT: bnez a0, .LBB4_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB4_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp olt float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -92,6 +147,18 @@ define float @select_fcmp_ole(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB5_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_ole: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fle.s a0, ft0, ft1 +; RV64IF-NEXT: bnez a0, .LBB5_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB5_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp ole float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -117,6 +184,25 @@ define float @select_fcmp_one(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB6_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_one: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: feq.s a1, ft0, ft1 +; RV64IF-NEXT: not a1, a1 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: bnez a0, .LBB6_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB6_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp one float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -138,6 +224,22 @@ define float @select_fcmp_ord(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB7_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_ord: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB7_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB7_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp ord float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -160,6 +262,23 @@ define float @select_fcmp_ueq(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB8_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_ueq: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: feq.s a1, ft0, ft1 +; RV64IF-NEXT: or a0, a1, a0 +; RV64IF-NEXT: bnez a0, .LBB8_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB8_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp ueq float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -178,6 +297,19 @@ define float @select_fcmp_ugt(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB9_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_ugt: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fle.s a0, ft0, ft1 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB9_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB9_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp ugt float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -196,6 +328,19 @@ define float @select_fcmp_uge(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB10_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_uge: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: flt.s a0, ft0, ft1 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB10_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB10_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp uge float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -214,6 +359,19 @@ define float @select_fcmp_ult(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB11_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_ult: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fle.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB11_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB11_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp ult float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -232,6 +390,19 @@ define float @select_fcmp_ule(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB12_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_ule: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: flt.s a0, ft1, ft0 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB12_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB12_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp ule float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -250,6 +421,19 @@ define float @select_fcmp_une(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB13_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_une: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: feq.s a0, ft0, ft1 +; RV64IF-NEXT: xori a0, a0, 1 +; RV64IF-NEXT: bnez a0, .LBB13_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB13_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp une float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -271,6 +455,21 @@ define float @select_fcmp_uno(float %a, float %b) nounwind { ; RV32IF-NEXT: .LBB14_2: ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_uno: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: bnez a0, .LBB14_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: fmv.s ft0, ft1 +; RV64IF-NEXT: .LBB14_2: +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret %1 = fcmp uno float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -280,6 +479,10 @@ define float @select_fcmp_true(float %a, float %b) nounwind { ; RV32IF-LABEL: select_fcmp_true: ; RV32IF: # %bb.0: ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: select_fcmp_true: +; RV64IF: # %bb.0: +; RV64IF-NEXT: ret %1 = fcmp true float %a, %b %2 = select i1 %1, float %a, float %b ret float %2 @@ -298,6 +501,18 @@ define i32 @i32_select_fcmp_oeq(float %a, float %b, i32 %c, i32 %d) nounwind { ; RV32IF-NEXT: .LBB16_2: ; RV32IF-NEXT: mv a0, a2 ; RV32IF-NEXT: ret +; +; RV64IF-LABEL: i32_select_fcmp_oeq: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: feq.s a0, ft1, ft0 +; RV64IF-NEXT: bnez a0, .LBB16_2 +; RV64IF-NEXT: # %bb.1: +; RV64IF-NEXT: mv a2, a3 +; RV64IF-NEXT: .LBB16_2: +; RV64IF-NEXT: mv a0, a2 +; RV64IF-NEXT: ret %1 = fcmp oeq float %a, %b %2 = select i1 %1, i32 %c, i32 %d ret i32 %2 diff --git a/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll b/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll new file mode 100644 index 0000000000000..8d5b6dd56d766 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll @@ -0,0 +1,86 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32IF %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IF %s + +; This file provides a simple sanity check of float and double operations for +; RV32I and RV64I. This is primarily intended to ensure that custom +; legalisation or DAG combines aren't incorrectly triggered when the F +; extension isn't enabled. + +; TODO: f32 parameters on RV64 with a soft-float ABI are anyext. + +define float @float_test(float %a, float %b) nounwind { +; RV32IF-LABEL: float_test: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) +; RV32IF-NEXT: sw s1, 8(sp) +; RV32IF-NEXT: mv s1, a1 +; RV32IF-NEXT: call __addsf3 +; RV32IF-NEXT: mv a1, s1 +; RV32IF-NEXT: call __divsf3 +; RV32IF-NEXT: lw s1, 8(sp) +; RV32IF-NEXT: lw ra, 12(sp) +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: float_test: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: sd s1, 0(sp) +; RV64IF-NEXT: slli a0, a0, 32 +; RV64IF-NEXT: srli a0, a0, 32 +; RV64IF-NEXT: slli a1, a1, 32 +; RV64IF-NEXT: srli s1, a1, 32 +; RV64IF-NEXT: mv a1, s1 +; RV64IF-NEXT: call __addsf3 +; RV64IF-NEXT: mv a1, s1 +; RV64IF-NEXT: call __divsf3 +; RV64IF-NEXT: ld s1, 0(sp) +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret + %1 = fadd float %a, %b + %2 = fdiv float %1, %b + ret float %2 +} + +define double @double_test(double %a, double %b) nounwind { +; RV32IF-LABEL: double_test: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) +; RV32IF-NEXT: sw s1, 8(sp) +; RV32IF-NEXT: sw s2, 4(sp) +; RV32IF-NEXT: mv s2, a3 +; RV32IF-NEXT: mv s1, a2 +; RV32IF-NEXT: call __adddf3 +; RV32IF-NEXT: mv a2, s1 +; RV32IF-NEXT: mv a3, s2 +; RV32IF-NEXT: call __divdf3 +; RV32IF-NEXT: lw s2, 4(sp) +; RV32IF-NEXT: lw s1, 8(sp) +; RV32IF-NEXT: lw ra, 12(sp) +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: double_test: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) +; RV64IF-NEXT: sd s1, 0(sp) +; RV64IF-NEXT: mv s1, a1 +; RV64IF-NEXT: call __adddf3 +; RV64IF-NEXT: mv a1, s1 +; RV64IF-NEXT: call __divdf3 +; RV64IF-NEXT: ld s1, 0(sp) +; RV64IF-NEXT: ld ra, 8(sp) +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret + %1 = fadd double %a, %b + %2 = fdiv double %1, %b + ret double %2 +} diff --git a/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll new file mode 100644 index 0000000000000..5f8952085ae89 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll @@ -0,0 +1,187 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IF + +; This file exhaustively checks float<->i32 conversions. In general, +; fcvt.l[u].s can be selected instead of fcvt.w[u].s because poison is +; generated for an fpto[s|u]i conversion if the result doesn't fit in the +; target type. + +define i32 @aext_fptosi(float %a) nounwind { +; RV64IF-LABEL: aext_fptosi: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz +; RV64IF-NEXT: ret + %1 = fptosi float %a to i32 + ret i32 %1 +} + +define signext i32 @sext_fptosi(float %a) nounwind { +; RV64IF-LABEL: sext_fptosi: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz +; RV64IF-NEXT: ret + %1 = fptosi float %a to i32 + ret i32 %1 +} + +define zeroext i32 @zext_fptosi(float %a) nounwind { +; RV64IF-LABEL: zext_fptosi: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz +; RV64IF-NEXT: slli a0, a0, 32 +; RV64IF-NEXT: srli a0, a0, 32 +; RV64IF-NEXT: ret + %1 = fptosi float %a to i32 + ret i32 %1 +} + +define i32 @aext_fptoui(float %a) nounwind { +; RV64IF-LABEL: aext_fptoui: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fcvt.lu.s a0, ft0, rtz +; RV64IF-NEXT: ret + %1 = fptoui float %a to i32 + ret i32 %1 +} + +define signext i32 @sext_fptoui(float %a) nounwind { +; RV64IF-LABEL: sext_fptoui: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fcvt.wu.s a0, ft0, rtz +; RV64IF-NEXT: ret + %1 = fptoui float %a to i32 + ret i32 %1 +} + +define zeroext i32 @zext_fptoui(float %a) nounwind { +; RV64IF-LABEL: zext_fptoui: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fcvt.lu.s a0, ft0, rtz +; RV64IF-NEXT: ret + %1 = fptoui float %a to i32 + ret i32 %1 +} + +define i32 @bcvt_f32_to_aext_i32(float %a, float %b) nounwind { +; RV64IF-LABEL: bcvt_f32_to_aext_i32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = fadd float %a, %b + %2 = bitcast float %1 to i32 + ret i32 %2 +} + +define signext i32 @bcvt_f32_to_sext_i32(float %a, float %b) nounwind { +; RV64IF-LABEL: bcvt_f32_to_sext_i32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = fadd float %a, %b + %2 = bitcast float %1 to i32 + ret i32 %2 +} + +define zeroext i32 @bcvt_f32_to_zext_i32(float %a, float %b) nounwind { +; RV64IF-LABEL: bcvt_f32_to_zext_i32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: slli a0, a0, 32 +; RV64IF-NEXT: srli a0, a0, 32 +; RV64IF-NEXT: ret + %1 = fadd float %a, %b + %2 = bitcast float %1 to i32 + ret i32 %2 +} + +define float @bcvt_i64_to_f32_via_i32(i64 %a, i64 %b) nounwind { +; RV64IF-LABEL: bcvt_i64_to_f32_via_i32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = trunc i64 %a to i32 + %2 = trunc i64 %b to i32 + %3 = bitcast i32 %1 to float + %4 = bitcast i32 %2 to float + %5 = fadd float %3, %4 + ret float %5 +} + +define float @uitofp_aext_i32_to_f32(i32 %a) nounwind { +; RV64IF-LABEL: uitofp_aext_i32_to_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fcvt.s.wu ft0, a0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = uitofp i32 %a to float + ret float %1 +} + +define float @uitofp_sext_i32_to_f32(i32 signext %a) nounwind { +; RV64IF-LABEL: uitofp_sext_i32_to_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fcvt.s.wu ft0, a0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = uitofp i32 %a to float + ret float %1 +} + +define float @uitofp_zext_i32_to_f32(i32 zeroext %a) nounwind { +; RV64IF-LABEL: uitofp_zext_i32_to_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fcvt.s.wu ft0, a0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = uitofp i32 %a to float + ret float %1 +} + +define float @sitofp_aext_i32_to_f32(i32 %a) nounwind { +; RV64IF-LABEL: sitofp_aext_i32_to_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fcvt.s.w ft0, a0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = sitofp i32 %a to float + ret float %1 +} + +define float @sitofp_sext_i32_to_f32(i32 signext %a) nounwind { +; RV64IF-LABEL: sitofp_sext_i32_to_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fcvt.s.l ft0, a0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = sitofp i32 %a to float + ret float %1 +} + +define float @sitofp_zext_i32_to_f32(i32 zeroext %a) nounwind { +; RV64IF-LABEL: sitofp_zext_i32_to_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fcvt.s.w ft0, a0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret + %1 = sitofp i32 %a to float + ret float %1 +} From 70b9bc4d72a02a2d2164ce807075e55ea27ff466 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Fri, 1 Feb 2019 03:53:30 +0000 Subject: [PATCH 11/11] [RISCV] Implement RV64D codegen This patch: * Adds necessary RV64D codegen patterns * Modifies CC_RISCV so it will properly handle f64 types (with soft float ABI) Note that in general there is no reason to try to select fcvt.w[u].d rather than fcvt.l[u].d for i32 conversions because fptosi/fptoui produce poison if the input won't fit into the target type. Differential Revision: https://reviews.llvm.org/D53237 llvm-svn: 352833 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 11 +- llvm/lib/Target/RISCV/RISCVInstrInfoD.td | 23 ++ llvm/test/CodeGen/RISCV/double-arith.ll | 167 +++++++++++ .../RISCV/double-bitmanip-dagcombines.ll | 28 ++ llvm/test/CodeGen/RISCV/double-br-fcmp.ll | 278 ++++++++++++++++++ llvm/test/CodeGen/RISCV/double-convert.ll | 180 ++++++++++++ llvm/test/CodeGen/RISCV/double-fcmp.ll | 134 +++++++++ llvm/test/CodeGen/RISCV/double-imm.ll | 27 ++ llvm/test/CodeGen/RISCV/double-intrinsics.ll | 217 ++++++++++++++ llvm/test/CodeGen/RISCV/double-mem.ll | 83 ++++++ llvm/test/CodeGen/RISCV/double-select-fcmp.ll | 215 ++++++++++++++ .../RISCV/double-stack-spill-restore.ll | 24 ++ .../CodeGen/RISCV/rv64d-double-convert.ll | 130 ++++++++ 13 files changed, 1513 insertions(+), 4 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/rv64d-double-convert.ll diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 7f880d28e7635..dbbe046ae2f8c 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -936,6 +936,10 @@ static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT, LocVT = XLenVT; LocInfo = CCValAssign::BCvt; } + if (XLen == 64 && ValVT == MVT::f64) { + LocVT = MVT::i64; + LocInfo = CCValAssign::BCvt; + } // Any return value split in to more than two values can't be returned // directly. @@ -1043,8 +1047,9 @@ static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT, return false; } - if (ValVT == MVT::f32) { - LocVT = MVT::f32; + // When an f32 or f64 is passed on the stack, no bit-conversion is needed. + if (ValVT == MVT::f32 || ValVT == MVT::f64) { + LocVT = ValVT; LocInfo = CCValAssign::Full; } State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); @@ -1179,8 +1184,6 @@ static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, ExtType = ISD::NON_EXTLOAD; break; } - if (ValVT == MVT::f32) - LocVT = MVT::f32; Val = DAG.getExtLoad( ExtType, DL, LocVT, Chain, FIN, MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td index 9f1cd50de595d..56fb1a414d478 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -308,3 +308,26 @@ def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_WU_D FPR64:$rs1, 0b001)>; def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_W GPR:$rs1)>; def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_WU GPR:$rs1)>; } // Predicates = [HasStdExtD, IsRV32] + +let Predicates = [HasStdExtD, IsRV64] in { +def : Pat<(bitconvert GPR:$rs1), (FMV_D_X GPR:$rs1)>; +def : Pat<(bitconvert FPR64:$rs1), (FMV_X_D FPR64:$rs1)>; + +// FP->[u]int32 is mostly handled by the FP->[u]int64 patterns. This is safe +// because fpto[u|s]i produce poison if the value can't fit into the target. +// We match the single case below because fcvt.wu.d sign-extends its result so +// is cheaper than fcvt.lu.d+sext.w. +def : Pat<(sext_inreg (zexti32 (fp_to_uint FPR64:$rs1)), i32), + (FCVT_WU_D $rs1, 0b001)>; + +// [u]int32->fp +def : Pat<(sint_to_fp (sext_inreg GPR:$rs1, i32)), (FCVT_D_W $rs1)>; +def : Pat<(uint_to_fp (zexti32 GPR:$rs1)), (FCVT_D_WU $rs1)>; + +def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_L_D FPR64:$rs1, 0b001)>; +def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_LU_D FPR64:$rs1, 0b001)>; + +// [u]int64->fp. Match GCC and default to using dynamic rounding mode. +def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_L GPR:$rs1, 0b111)>; +def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_LU GPR:$rs1, 0b111)>; +} // Predicates = [HasStdExtD, IsRV64] diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll index a5243ea18ab14..2b696210ae1df 100644 --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s ; These tests are each targeted at a particular RISC-V FPU instruction. Most ; other files in this folder exercise LLVM IR instructions that don't directly @@ -22,6 +24,14 @@ define double @fadd_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fadd_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fadd double %a, %b ret double %1 } @@ -42,6 +52,14 @@ define double @fsub_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsub_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsub.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fsub double %a, %b ret double %1 } @@ -62,6 +80,14 @@ define double @fmul_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmul_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fmul double %a, %b ret double %1 } @@ -82,6 +108,14 @@ define double @fdiv_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fdiv_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fdiv.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fdiv double %a, %b ret double %1 } @@ -101,6 +135,13 @@ define double @fsqrt_d(double %a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsqrt_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fsqrt.d ft0, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.sqrt.f64(double %a) ret double %1 } @@ -123,6 +164,14 @@ define double @fsgnj_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsgnj_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.copysign.f64(double %a, double %b) ret double %1 } @@ -141,6 +190,14 @@ define i32 @fneg_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: feq.d a0, ft0, ft1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fneg_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fadd.d ft0, ft0, ft0 +; RV64IFD-NEXT: fneg.d ft1, ft0 +; RV64IFD-NEXT: feq.d a0, ft0, ft1 +; RV64IFD-NEXT: ret %1 = fadd double %a, %a %2 = fneg double %1 %3 = fcmp oeq double %1, %2 @@ -149,6 +206,9 @@ define i32 @fneg_d(double %a, double %b) nounwind { } define double @fsgnjn_d(double %a, double %b) nounwind { +; TODO: fsgnjn.s isn't selected on RV64 because DAGCombiner::visitBITCAST will +; convert (bitconvert (fneg x)) to a xor. +; ; RV32IFD-LABEL: fsgnjn_d: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 @@ -164,6 +224,17 @@ define double @fsgnjn_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsgnjn_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a2, zero, -1 +; RV64IFD-NEXT: slli a2, a2, 63 +; RV64IFD-NEXT: xor a1, a1, a2 +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fsub double -0.0, %b %2 = call double @llvm.copysign.f64(double %a, double %1) ret double %2 @@ -191,6 +262,16 @@ define double @fabs_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fabs_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fabs.d ft1, ft0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fadd double %a, %b %2 = call double @llvm.fabs.f64(double %1) %3 = fadd double %2, %1 @@ -215,6 +296,14 @@ define double @fmin_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmin_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmin.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.minnum.f64(double %a, double %b) ret double %1 } @@ -237,6 +326,14 @@ define double @fmax_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmax_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmax.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.maxnum.f64(double %a, double %b) ret double %1 } @@ -254,6 +351,13 @@ define i32 @feq_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: feq.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: feq_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -272,6 +376,13 @@ define i32 @flt_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: flt.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: flt_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp olt double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -290,6 +401,13 @@ define i32 @fle_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: fle.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fle_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ole double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -316,6 +434,15 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmadd_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.fma.f64(double %a, double %b, double %c) ret double %1 } @@ -343,6 +470,19 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmsub_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: lui a2, %hi(.LCPI15_0) +; RV64IFD-NEXT: addi a2, a2, %lo(.LCPI15_0) +; RV64IFD-NEXT: fld ft1, 0(a2) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fmsub.d ft0, ft2, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %c_ = fadd double 0.0, %c ; avoid negation using xor %negc = fsub double -0.0, %c_ %1 = call double @llvm.fma.f64(double %a, double %b, double %negc) @@ -373,6 +513,20 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fnmadd_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: lui a2, %hi(.LCPI16_0) +; RV64IFD-NEXT: addi a2, a2, %lo(.LCPI16_0) +; RV64IFD-NEXT: fld ft1, 0(a2) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fadd.d ft1, ft2, ft1 +; RV64IFD-NEXT: fmv.d.x ft2, a1 +; RV64IFD-NEXT: fnmadd.d ft0, ft1, ft2, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %a_ = fadd double 0.0, %a %c_ = fadd double 0.0, %c %nega = fsub double -0.0, %a_ @@ -404,6 +558,19 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fnmsub_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: lui a0, %hi(.LCPI17_0) +; RV64IFD-NEXT: addi a0, a0, %lo(.LCPI17_0) +; RV64IFD-NEXT: fld ft1, 0(a0) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.d.x ft1, a2 +; RV64IFD-NEXT: fmv.d.x ft2, a1 +; RV64IFD-NEXT: fnmsub.d ft0, ft0, ft2, ft1 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %a_ = fadd double 0.0, %a %nega = fsub double -0.0, %a_ %1 = call double @llvm.fma.f64(double %nega, double %b, double %c) diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll index 25364cf4e48d3..626a239e298a0 100644 --- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll @@ -5,6 +5,8 @@ ; RUN: | FileCheck -check-prefix=RV32IFD %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s ; ; This file tests cases where simple floating point operations can be ; profitably handled though bit manipulation if a soft-float ABI is being used @@ -34,6 +36,13 @@ define double @fneg(double %a) nounwind { ; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: ret +; +; RV64IFD-LABEL: fneg: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a1, zero, -1 +; RV64IFD-NEXT: slli a1, a1, 63 +; RV64IFD-NEXT: xor a0, a0, a1 +; RV64IFD-NEXT: ret %1 = fneg double %a ret double %1 } @@ -62,6 +71,14 @@ define double @fabs(double %a) nounwind { ; RV64I-NEXT: addi a1, a1, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret +; +; RV64IFD-LABEL: fabs: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a1, zero, -1 +; RV64IFD-NEXT: slli a1, a1, 63 +; RV64IFD-NEXT: addi a1, a1, -1 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: ret %1 = call double @llvm.fabs.f64(double %a) ret double %1 } @@ -109,6 +126,17 @@ define double @fcopysign_fneg(double %a, double %b) nounwind { ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret +; +; RV64IFD-LABEL: fcopysign_fneg: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a2, zero, -1 +; RV64IFD-NEXT: slli a2, a2, 63 +; RV64IFD-NEXT: xor a1, a1, a2 +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fneg double %b %2 = call double @llvm.copysign.f64(double %a, double %1) ret double %2 diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll index d1055c665864f..275c4aa05540a 100644 --- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s declare void @abort() declare void @exit(i32) @@ -18,6 +20,19 @@ define void @br_fcmp_false(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB0_2: # %if.else ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_false: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: addi a0, zero, 1 +; RV64IFD-NEXT: bnez a0, .LBB0_2 +; RV64IFD-NEXT: # %bb.1: # %if.then +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB0_2: # %if.else +; RV64IFD-NEXT: call abort %1 = fcmp false double %a, %b br i1 %1, label %if.then, label %if.else if.then: @@ -46,6 +61,21 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB1_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_oeq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB1_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB1_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp oeq double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -78,6 +108,22 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB2_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_oeq_alt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: beqz a0, .LBB2_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB2_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp oeq double %a, %b br i1 %1, label %if.then, label %if.else if.then: @@ -106,6 +152,21 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB3_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ogt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB3_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB3_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ogt double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -134,6 +195,21 @@ define void @br_fcmp_oge(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB4_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_oge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB4_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB4_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp oge double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -162,6 +238,21 @@ define void @br_fcmp_olt(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB5_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_olt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB5_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB5_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp olt double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -190,6 +281,21 @@ define void @br_fcmp_ole(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB6_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ole: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB6_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB6_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ole double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -226,6 +332,28 @@ define void @br_fcmp_one(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB7_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_one: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: not a1, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: bnez a0, .LBB7_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB7_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp one double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -258,6 +386,25 @@ define void @br_fcmp_ord(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB8_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ord: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB8_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB8_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ord double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -291,6 +438,26 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB9_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ueq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: feq.d a2, ft1, ft1 +; RV64IFD-NEXT: and a1, a2, a1 +; RV64IFD-NEXT: seqz a1, a1 +; RV64IFD-NEXT: or a0, a0, a1 +; RV64IFD-NEXT: bnez a0, .LBB9_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB9_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ueq double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -320,6 +487,22 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB10_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ugt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB10_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB10_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ugt double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -349,6 +532,22 @@ define void @br_fcmp_uge(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB11_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_uge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB11_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB11_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp uge double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -378,6 +577,22 @@ define void @br_fcmp_ult(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB12_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ult: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB12_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB12_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ult double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -407,6 +622,22 @@ define void @br_fcmp_ule(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB13_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_ule: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB13_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB13_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp ule double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -436,6 +667,22 @@ define void @br_fcmp_une(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB14_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_une: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB14_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB14_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp une double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -468,6 +715,24 @@ define void @br_fcmp_uno(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB15_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_uno: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: bnez a0, .LBB15_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB15_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp uno double %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -490,6 +755,19 @@ define void @br_fcmp_true(double %a, double %b) nounwind { ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB16_2: # %if.then ; RV32IFD-NEXT: call abort +; +; RV64IFD-LABEL: br_fcmp_true: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: addi a0, zero, 1 +; RV64IFD-NEXT: bnez a0, .LBB16_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB16_2: # %if.then +; RV64IFD-NEXT: call abort %1 = fcmp true double %a, %b br i1 %1, label %if.then, label %if.else if.else: diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll index 44de78dc74025..1d30fdadf4830 100644 --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define float @fcvt_s_d(double %a) nounwind { ; RV32IFD-LABEL: fcvt_s_d: @@ -13,6 +15,13 @@ define float @fcvt_s_d(double %a) nounwind { ; RV32IFD-NEXT: fmv.x.w a0, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_s_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fcvt.s.d ft0, ft0 +; RV64IFD-NEXT: fmv.x.w a0, ft0 +; RV64IFD-NEXT: ret %1 = fptrunc double %a to float ret float %1 } @@ -28,10 +37,19 @@ define double @fcvt_d_s(float %a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_d_s: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.w.x ft0, a0 +; RV64IFD-NEXT: fcvt.d.s ft0, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fpext float %a to double ret double %1 } +; For RV64D, fcvt.l.d is semantically equivalent to fcvt.w.d in this case +; because fptosi will produce poison if the result doesn't fit into an i32. define i32 @fcvt_w_d(double %a) nounwind { ; RV32IFD-LABEL: fcvt_w_d: ; RV32IFD: # %bb.0: @@ -42,10 +60,18 @@ define i32 @fcvt_w_d(double %a) nounwind { ; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_w_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz +; RV64IFD-NEXT: ret %1 = fptosi double %a to i32 ret i32 %1 } +; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case +; because fptosi will produce poison if the result doesn't fit into an i32. define i32 @fcvt_wu_d(double %a) nounwind { ; RV32IFD-LABEL: fcvt_wu_d: ; RV32IFD: # %bb.0: @@ -56,6 +82,12 @@ define i32 @fcvt_wu_d(double %a) nounwind { ; RV32IFD-NEXT: fcvt.wu.d a0, ft0, rtz ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_wu_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fcvt.lu.d a0, ft0, rtz +; RV64IFD-NEXT: ret %1 = fptoui double %a to i32 ret i32 %1 } @@ -70,6 +102,12 @@ define double @fcvt_d_w(i32 %a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_d_w: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.d.w ft0, a0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = sitofp i32 %a to double ret double %1 } @@ -84,6 +122,148 @@ define double @fcvt_d_wu(i32 %a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_d_wu: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.d.wu ft0, a0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = uitofp i32 %a to double ret double %1 } + +define i64 @fcvt_l_d(double %a) nounwind { +; RV32IFD-LABEL: fcvt_l_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) +; RV32IFD-NEXT: call __fixdfdi +; RV32IFD-NEXT: lw ra, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_l_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz +; RV64IFD-NEXT: ret + %1 = fptosi double %a to i64 + ret i64 %1 +} + +define i64 @fcvt_lu_d(double %a) nounwind { +; RV32IFD-LABEL: fcvt_lu_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) +; RV32IFD-NEXT: call __fixunsdfdi +; RV32IFD-NEXT: lw ra, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_lu_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fcvt.lu.d a0, ft0, rtz +; RV64IFD-NEXT: ret + %1 = fptoui double %a to i64 + ret i64 %1 +} + +define i64 @fmv_x_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: fmv_x_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 0(sp) +; RV32IFD-NEXT: sw a3, 4(sp) +; RV32IFD-NEXT: fld ft0, 0(sp) +; RV32IFD-NEXT: sw a0, 0(sp) +; RV32IFD-NEXT: sw a1, 4(sp) +; RV32IFD-NEXT: fld ft1, 0(sp) +; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmv_x_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret + %1 = fadd double %a, %b + %2 = bitcast double %1 to i64 + ret i64 %2 +} + +define double @fcvt_d_l(i64 %a) nounwind { +; RV32IFD-LABEL: fcvt_d_l: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) +; RV32IFD-NEXT: call __floatdidf +; RV32IFD-NEXT: lw ra, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_d_l: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.d.l ft0, a0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret + %1 = sitofp i64 %a to double + ret double %1 +} + +define double @fcvt_d_lu(i64 %a) nounwind { +; RV32IFD-LABEL: fcvt_d_lu: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) +; RV32IFD-NEXT: call __floatundidf +; RV32IFD-NEXT: lw ra, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_d_lu: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.d.lu ft0, a0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret + %1 = uitofp i64 %a to double + ret double %1 +} + +define double @fmv_d_x(i64 %a, i64 %b) nounwind { +; Ensure fmv.w.x is generated even for a soft double calling convention +; RV32IFD-LABEL: fmv_d_x: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -32 +; RV32IFD-NEXT: sw a3, 20(sp) +; RV32IFD-NEXT: sw a2, 16(sp) +; RV32IFD-NEXT: sw a1, 28(sp) +; RV32IFD-NEXT: sw a0, 24(sp) +; RV32IFD-NEXT: fld ft0, 16(sp) +; RV32IFD-NEXT: fld ft1, 24(sp) +; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 32 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmv_d_x: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret + %1 = bitcast i64 %a to double + %2 = bitcast i64 %b to double + %3 = fadd double %1, %2 + ret double %3 +} diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll index adcd804122b82..25c8f6d96fb80 100644 --- a/llvm/test/CodeGen/RISCV/double-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll @@ -1,12 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define i32 @fcmp_false(double %a, double %b) nounwind { ; RV32IFD-LABEL: fcmp_false: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: mv a0, zero ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_false: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: mv a0, zero +; RV64IFD-NEXT: ret %1 = fcmp false double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -25,6 +32,13 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind { ; RV32IFD-NEXT: feq.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_oeq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -43,6 +57,13 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind { ; RV32IFD-NEXT: flt.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ogt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ogt double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -61,6 +82,13 @@ define i32 @fcmp_oge(double %a, double %b) nounwind { ; RV32IFD-NEXT: fle.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_oge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oge double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -79,6 +107,13 @@ define i32 @fcmp_olt(double %a, double %b) nounwind { ; RV32IFD-NEXT: flt.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_olt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp olt double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -97,6 +132,13 @@ define i32 @fcmp_ole(double %a, double %b) nounwind { ; RV32IFD-NEXT: fle.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ole: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ole double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -122,6 +164,20 @@ define i32 @fcmp_one(double %a, double %b) nounwind { ; RV32IFD-NEXT: and a0, a1, a0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_one: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: not a1, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: ret %1 = fcmp one double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -144,6 +200,17 @@ define i32 @fcmp_ord(double %a, double %b) nounwind { ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ord: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp ord double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -167,6 +234,18 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind { ; RV32IFD-NEXT: or a0, a0, a1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ueq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: feq.d a2, ft1, ft1 +; RV64IFD-NEXT: and a1, a2, a1 +; RV64IFD-NEXT: seqz a1, a1 +; RV64IFD-NEXT: or a0, a0, a1 +; RV64IFD-NEXT: ret %1 = fcmp ueq double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -186,6 +265,14 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind { ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ugt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp ugt double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -205,6 +292,14 @@ define i32 @fcmp_uge(double %a, double %b) nounwind { ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_uge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp uge double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -224,6 +319,14 @@ define i32 @fcmp_ult(double %a, double %b) nounwind { ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ult: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp ult double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -243,6 +346,14 @@ define i32 @fcmp_ule(double %a, double %b) nounwind { ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_ule: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp ule double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -262,6 +373,14 @@ define i32 @fcmp_une(double %a, double %b) nounwind { ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_une: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: ret %1 = fcmp une double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -283,6 +402,16 @@ define i32 @fcmp_uno(double %a, double %b) nounwind { ; RV32IFD-NEXT: seqz a0, a0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_uno: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: ret %1 = fcmp uno double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -293,6 +422,11 @@ define i32 @fcmp_true(double %a, double %b) nounwind { ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi a0, zero, 1 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcmp_true: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a0, zero, 1 +; RV64IFD-NEXT: ret %1 = fcmp true double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 diff --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll index a652d862175ae..8a763b82202a4 100644 --- a/llvm/test/CodeGen/RISCV/double-imm.ll +++ b/llvm/test/CodeGen/RISCV/double-imm.ll @@ -1,8 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define double @double_imm() nounwind { +; TODO: Should probably prefer fld or ld on RV64 rather than materialising an +; expensive constant. +; ; RV32IFD-LABEL: double_imm: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 @@ -14,6 +19,18 @@ define double @double_imm() nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: double_imm: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 512 +; RV64IFD-NEXT: addiw a0, a0, 1169 +; RV64IFD-NEXT: slli a0, a0, 15 +; RV64IFD-NEXT: addi a0, a0, -299 +; RV64IFD-NEXT: slli a0, a0, 14 +; RV64IFD-NEXT: addi a0, a0, 1091 +; RV64IFD-NEXT: slli a0, a0, 12 +; RV64IFD-NEXT: addi a0, a0, -744 +; RV64IFD-NEXT: ret ret double 3.1415926535897931159979634685441851615905761718750 } @@ -33,6 +50,16 @@ define double @double_imm_op(double %a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: double_imm_op: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: lui a0, %hi(.LCPI1_0) +; RV64IFD-NEXT: addi a0, a0, %lo(.LCPI1_0) +; RV64IFD-NEXT: fld ft1, 0(a0) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fadd double %a, 1.0 ret double %1 } diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll index 7e69bb1743a4f..354601b91a8ce 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s declare double @llvm.sqrt.f64(double) @@ -17,6 +19,13 @@ define double @sqrt_f64(double %a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: sqrt_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fsqrt.d ft0, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.sqrt.f64(double %a) ret double %1 } @@ -32,6 +41,16 @@ define double @powi_f64(double %a, i32 %b) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: powi_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: sext.w a1, a1 +; RV64IFD-NEXT: call __powidf2 +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.powi.f64(double %a, i32 %b) ret double %1 } @@ -47,6 +66,15 @@ define double @sin_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: sin_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call sin +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.sin.f64(double %a) ret double %1 } @@ -62,6 +90,15 @@ define double @cos_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: cos_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call cos +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.cos.f64(double %a) ret double %1 } @@ -101,6 +138,27 @@ define double @sincos_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 28(sp) ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: sincos_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -32 +; RV64IFD-NEXT: sd ra, 24(sp) +; RV64IFD-NEXT: sd s1, 16(sp) +; RV64IFD-NEXT: sd s2, 8(sp) +; RV64IFD-NEXT: mv s1, a0 +; RV64IFD-NEXT: call sin +; RV64IFD-NEXT: mv s2, a0 +; RV64IFD-NEXT: mv a0, s1 +; RV64IFD-NEXT: call cos +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, s2 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ld s2, 8(sp) +; RV64IFD-NEXT: ld s1, 16(sp) +; RV64IFD-NEXT: ld ra, 24(sp) +; RV64IFD-NEXT: addi sp, sp, 32 +; RV64IFD-NEXT: ret %1 = call double @llvm.sin.f64(double %a) %2 = call double @llvm.cos.f64(double %a) %3 = fadd double %1, %2 @@ -118,6 +176,15 @@ define double @pow_f64(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: pow_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call pow +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.pow.f64(double %a, double %b) ret double %1 } @@ -133,6 +200,15 @@ define double @exp_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: exp_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call exp +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.exp.f64(double %a) ret double %1 } @@ -148,6 +224,15 @@ define double @exp2_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: exp2_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call exp2 +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.exp2.f64(double %a) ret double %1 } @@ -163,6 +248,15 @@ define double @log_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: log_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call log +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.log.f64(double %a) ret double %1 } @@ -178,6 +272,15 @@ define double @log10_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: log10_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call log10 +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.log10.f64(double %a) ret double %1 } @@ -193,6 +296,15 @@ define double @log2_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: log2_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call log2 +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.log2.f64(double %a) ret double %1 } @@ -218,6 +330,15 @@ define double @fma_f64(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fma_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.fma.f64(double %a, double %b, double %c) ret double %1 } @@ -245,6 +366,16 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmuladd_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.d.x ft1, a2 +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c) ret double %1 } @@ -258,6 +389,14 @@ define double @fabs_f64(double %a) nounwind { ; RV32IFD-NEXT: addi a2, a2, -1 ; RV32IFD-NEXT: and a1, a1, a2 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fabs_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a1, zero, -1 +; RV64IFD-NEXT: slli a1, a1, 63 +; RV64IFD-NEXT: addi a1, a1, -1 +; RV64IFD-NEXT: and a0, a0, a1 +; RV64IFD-NEXT: ret %1 = call double @llvm.fabs.f64(double %a) ret double %1 } @@ -280,6 +419,14 @@ define double @minnum_f64(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: minnum_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmin.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.minnum.f64(double %a, double %b) ret double %1 } @@ -302,6 +449,14 @@ define double @maxnum_f64(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: maxnum_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmax.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.maxnum.f64(double %a, double %b) ret double %1 } @@ -341,6 +496,14 @@ define double @copysign_f64(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: copysign_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.copysign.f64(double %a, double %b) ret double %1 } @@ -356,6 +519,15 @@ define double @floor_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: floor_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call floor +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.floor.f64(double %a) ret double %1 } @@ -371,6 +543,15 @@ define double @ceil_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: ceil_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call ceil +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.ceil.f64(double %a) ret double %1 } @@ -386,6 +567,15 @@ define double @trunc_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: trunc_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call trunc +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.trunc.f64(double %a) ret double %1 } @@ -401,6 +591,15 @@ define double @rint_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: rint_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call rint +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.rint.f64(double %a) ret double %1 } @@ -416,6 +615,15 @@ define double @nearbyint_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: nearbyint_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call nearbyint +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.nearbyint.f64(double %a) ret double %1 } @@ -431,6 +639,15 @@ define double @round_f64(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: round_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: call round +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = call double @llvm.round.f64(double %a) ret double %1 } diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll index 5b20447ed73ea..a7a93de8a50e0 100644 --- a/llvm/test/CodeGen/RISCV/double-mem.ll +++ b/llvm/test/CodeGen/RISCV/double-mem.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define double @fld(double *%a) nounwind { ; RV32IFD-LABEL: fld: @@ -14,6 +16,14 @@ define double @fld(double *%a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fld: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fld ft0, 24(a0) +; RV64IFD-NEXT: fld ft1, 0(a0) +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = load double, double* %a %2 = getelementptr double, double* %a, i32 3 %3 = load double, double* %2 @@ -38,6 +48,15 @@ define void @fsd(double *%a, double %b, double %c) nounwind { ; RV32IFD-NEXT: fsd ft0, 0(a0) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsd: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fsd ft0, 64(a0) +; RV64IFD-NEXT: fsd ft0, 0(a0) +; RV64IFD-NEXT: ret ; Use %b and %c in an FP op to ensure floating point registers are used, even ; for the soft float ABI %1 = fadd double %b, %c @@ -72,6 +91,20 @@ define double @fld_fsd_global(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fld_fsd_global: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: lui a0, %hi(G) +; RV64IFD-NEXT: fld ft1, %lo(G)(a0) +; RV64IFD-NEXT: fsd ft0, %lo(G)(a0) +; RV64IFD-NEXT: addi a0, a0, %lo(G) +; RV64IFD-NEXT: fld ft1, 72(a0) +; RV64IFD-NEXT: fsd ft0, 72(a0) +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret ; Use %a and %b in an FP op to ensure floating point registers are used, even ; for the soft float ABI %1 = fadd double %a, %b @@ -100,6 +133,18 @@ define double @fld_fsd_constant(double %a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fld_fsd_constant: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: lui a0, 56 +; RV64IFD-NEXT: addiw a0, a0, -1353 +; RV64IFD-NEXT: slli a0, a0, 14 +; RV64IFD-NEXT: fld ft1, -273(a0) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fsd ft0, -273(a0) +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = inttoptr i32 3735928559 to double* %2 = load volatile double, double* %1 %3 = fadd double %a, %2 @@ -133,6 +178,23 @@ define double @fld_stack(double %a) nounwind { ; RV32IFD-NEXT: lw ra, 28(sp) ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fld_stack: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -32 +; RV64IFD-NEXT: sd ra, 24(sp) +; RV64IFD-NEXT: sd s1, 16(sp) +; RV64IFD-NEXT: mv s1, a0 +; RV64IFD-NEXT: addi a0, sp, 8 +; RV64IFD-NEXT: call notdead +; RV64IFD-NEXT: fmv.d.x ft0, s1 +; RV64IFD-NEXT: fld ft1, 8(sp) +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ld s1, 16(sp) +; RV64IFD-NEXT: ld ra, 24(sp) +; RV64IFD-NEXT: addi sp, sp, 32 +; RV64IFD-NEXT: ret %1 = alloca double, align 8 %2 = bitcast double* %1 to i8* call void @notdead(i8* %2) @@ -159,6 +221,20 @@ define void @fsd_stack(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw ra, 28(sp) ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsd_stack: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fsd ft0, 0(sp) +; RV64IFD-NEXT: mv a0, sp +; RV64IFD-NEXT: call notdead +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret %1 = fadd double %a, %b ; force store from FPR64 %2 = alloca double, align 8 store double %1, double* %2 @@ -179,6 +255,13 @@ define void @fsd_trunc(float* %a, double %b) nounwind noinline optnone { ; RV32IFD-NEXT: fsw ft0, 0(a0) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsd_trunc: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fcvt.s.d ft0, ft0 +; RV64IFD-NEXT: fsw ft0, 0(a0) +; RV64IFD-NEXT: ret %1 = fptrunc double %b to float store float %1, float* %a, align 4 ret void diff --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll index fa10ee2572452..4503c2365c816 100644 --- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define double @select_fcmp_false(double %a, double %b) nounwind { ; RV32IFD-LABEL: select_fcmp_false: @@ -8,6 +10,11 @@ define double @select_fcmp_false(double %a, double %b) nounwind { ; RV32IFD-NEXT: mv a1, a3 ; RV32IFD-NEXT: mv a0, a2 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_false: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: mv a0, a1 +; RV64IFD-NEXT: ret %1 = fcmp false double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -33,6 +40,18 @@ define double @select_fcmp_oeq(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_oeq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft1 +; RV64IFD-NEXT: bnez a0, .LBB1_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB1_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -58,6 +77,18 @@ define double @select_fcmp_ogt(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ogt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB2_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB2_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ogt double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -83,6 +114,18 @@ define double @select_fcmp_oge(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_oge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB3_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB3_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oge double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -108,6 +151,18 @@ define double @select_fcmp_olt(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_olt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: flt.d a0, ft0, ft1 +; RV64IFD-NEXT: bnez a0, .LBB4_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB4_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp olt double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -133,6 +188,18 @@ define double @select_fcmp_ole(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ole: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fle.d a0, ft0, ft1 +; RV64IFD-NEXT: bnez a0, .LBB5_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB5_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ole double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -166,6 +233,25 @@ define double @select_fcmp_one(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_one: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: not a1, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: bnez a0, .LBB6_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB6_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp one double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -195,6 +281,22 @@ define double @select_fcmp_ord(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ord: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB7_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB7_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ord double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -225,6 +327,23 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ueq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: or a0, a1, a0 +; RV64IFD-NEXT: bnez a0, .LBB8_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB8_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ueq double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -251,6 +370,19 @@ define double @select_fcmp_ugt(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ugt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fle.d a0, ft0, ft1 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB9_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB9_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ugt double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -277,6 +409,19 @@ define double @select_fcmp_uge(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_uge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: flt.d a0, ft0, ft1 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB10_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB10_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp uge double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -303,6 +448,19 @@ define double @select_fcmp_ult(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ult: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB11_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB11_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ult double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -329,6 +487,19 @@ define double @select_fcmp_ule(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ule: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB12_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB12_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ule double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -355,6 +526,19 @@ define double @select_fcmp_une(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_une: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft1 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB13_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB13_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp une double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -384,6 +568,21 @@ define double @select_fcmp_uno(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_uno: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: bnez a0, .LBB14_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB14_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp uno double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -393,6 +592,10 @@ define double @select_fcmp_true(double %a, double %b) nounwind { ; RV32IFD-LABEL: select_fcmp_true: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_true: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: ret %1 = fcmp true double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -417,6 +620,18 @@ define i32 @i32_select_fcmp_oeq(double %a, double %b, i32 %c, i32 %d) nounwind { ; RV32IFD-NEXT: mv a0, a4 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: i32_select_fcmp_oeq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB16_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: mv a2, a3 +; RV64IFD-NEXT: .LBB16_2: +; RV64IFD-NEXT: mv a0, a2 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = select i1 %1, i32 %c, i32 %d ret i32 %2 diff --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll index b5c0b991e6cc0..28a9e12dadd0e 100644 --- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll +++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define double @func(double %d, i32 %n) nounwind { ; RV32IFD-LABEL: func: @@ -30,6 +32,28 @@ define double @func(double %d, i32 %n) nounwind { ; RV32IFD-NEXT: lw ra, 28(sp) ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: func: +; RV64IFD: # %bb.0: # %entry +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: slli a0, a1, 32 +; RV64IFD-NEXT: srli a0, a0, 32 +; RV64IFD-NEXT: beqz a0, .LBB0_2 +; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: addi a1, a1, -1 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: fsd ft0, 0(sp) +; RV64IFD-NEXT: call func +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fld ft1, 0(sp) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: .LBB0_2: # %return +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ld ra, 8(sp) +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret entry: %cmp = icmp eq i32 %n, 0 br i1 %cmp, label %return, label %if.else diff --git a/llvm/test/CodeGen/RISCV/rv64d-double-convert.ll b/llvm/test/CodeGen/RISCV/rv64d-double-convert.ll new file mode 100644 index 0000000000000..da3fcf678e364 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64d-double-convert.ll @@ -0,0 +1,130 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ID + +; This file exhaustively checks double<->i32 conversions. In general, +; fcvt.l[u].d can be selected instead of fcvt.w[u].d because poison is +; generated for an fpto[s|u]i conversion if the result doesn't fit in the +; target type. + +define i32 @aext_fptosi(double %a) nounwind { +; RV64ID-LABEL: aext_fptosi: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fmv.d.x ft0, a0 +; RV64ID-NEXT: fcvt.l.d a0, ft0, rtz +; RV64ID-NEXT: ret + %1 = fptosi double %a to i32 + ret i32 %1 +} + +define signext i32 @sext_fptosi(double %a) nounwind { +; RV64ID-LABEL: sext_fptosi: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fmv.d.x ft0, a0 +; RV64ID-NEXT: fcvt.l.d a0, ft0, rtz +; RV64ID-NEXT: ret + %1 = fptosi double %a to i32 + ret i32 %1 +} + +define zeroext i32 @zext_fptosi(double %a) nounwind { +; RV64ID-LABEL: zext_fptosi: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fmv.d.x ft0, a0 +; RV64ID-NEXT: fcvt.l.d a0, ft0, rtz +; RV64ID-NEXT: slli a0, a0, 32 +; RV64ID-NEXT: srli a0, a0, 32 +; RV64ID-NEXT: ret + %1 = fptosi double %a to i32 + ret i32 %1 +} + +define i32 @aext_fptoui(double %a) nounwind { +; RV64ID-LABEL: aext_fptoui: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fmv.d.x ft0, a0 +; RV64ID-NEXT: fcvt.lu.d a0, ft0, rtz +; RV64ID-NEXT: ret + %1 = fptoui double %a to i32 + ret i32 %1 +} + +define signext i32 @sext_fptoui(double %a) nounwind { +; RV64ID-LABEL: sext_fptoui: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fmv.d.x ft0, a0 +; RV64ID-NEXT: fcvt.wu.d a0, ft0, rtz +; RV64ID-NEXT: ret + %1 = fptoui double %a to i32 + ret i32 %1 +} + +define zeroext i32 @zext_fptoui(double %a) nounwind { +; RV64ID-LABEL: zext_fptoui: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fmv.d.x ft0, a0 +; RV64ID-NEXT: fcvt.lu.d a0, ft0, rtz +; RV64ID-NEXT: ret + %1 = fptoui double %a to i32 + ret i32 %1 +} + +define double @uitofp_aext_i32_to_f64(i32 %a) nounwind { +; RV64ID-LABEL: uitofp_aext_i32_to_f64: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fcvt.d.wu ft0, a0 +; RV64ID-NEXT: fmv.x.d a0, ft0 +; RV64ID-NEXT: ret + %1 = uitofp i32 %a to double + ret double %1 +} + +define double @uitofp_sext_i32_to_f64(i32 signext %a) nounwind { +; RV64ID-LABEL: uitofp_sext_i32_to_f64: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fcvt.d.wu ft0, a0 +; RV64ID-NEXT: fmv.x.d a0, ft0 +; RV64ID-NEXT: ret + %1 = uitofp i32 %a to double + ret double %1 +} + +define double @uitofp_zext_i32_to_f64(i32 zeroext %a) nounwind { +; RV64ID-LABEL: uitofp_zext_i32_to_f64: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fcvt.d.wu ft0, a0 +; RV64ID-NEXT: fmv.x.d a0, ft0 +; RV64ID-NEXT: ret + %1 = uitofp i32 %a to double + ret double %1 +} + +define double @sitofp_aext_i32_to_f64(i32 %a) nounwind { +; RV64ID-LABEL: sitofp_aext_i32_to_f64: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fcvt.d.w ft0, a0 +; RV64ID-NEXT: fmv.x.d a0, ft0 +; RV64ID-NEXT: ret + %1 = sitofp i32 %a to double + ret double %1 +} + +define double @sitofp_sext_i32_to_f64(i32 signext %a) nounwind { +; RV64ID-LABEL: sitofp_sext_i32_to_f64: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fcvt.d.l ft0, a0 +; RV64ID-NEXT: fmv.x.d a0, ft0 +; RV64ID-NEXT: ret + %1 = sitofp i32 %a to double + ret double %1 +} + +define double @sitofp_zext_i32_to_f64(i32 zeroext %a) nounwind { +; RV64ID-LABEL: sitofp_zext_i32_to_f64: +; RV64ID: # %bb.0: +; RV64ID-NEXT: fcvt.d.w ft0, a0 +; RV64ID-NEXT: fmv.x.d a0, ft0 +; RV64ID-NEXT: ret + %1 = sitofp i32 %a to double + ret double %1 +}