diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index e75b4026d5424..863b59454b478 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -3119,7 +3119,7 @@ Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp, Value *Op0, *Op1; Instruction *Ext0, *Ext1; - const CmpInst::Predicate Pred = Cmp.getPredicate(); + const CmpPredicate Pred = Cmp.getCmpPredicate(); if (match(Add, m_Add(m_CombineAnd(m_Instruction(Ext0), m_ZExtOrSExt(m_Value(Op0))), m_CombineAnd(m_Instruction(Ext1), @@ -3156,7 +3156,8 @@ Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp, // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE // are canonicalized to SGT/SLT/UGT/ULT. if ((Add->hasNoSignedWrap() && - (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) || + (Pred.getPreferredSignedPredicate() == ICmpInst::ICMP_SGT || + Pred.getPreferredSignedPredicate() == ICmpInst::ICMP_SLT)) || (Add->hasNoUnsignedWrap() && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) { bool Overflow; @@ -3165,9 +3166,13 @@ Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp, // If there is overflow, the result must be true or false. // TODO: Can we assert there is no overflow because InstSimplify always // handles those cases? - if (!Overflow) + if (!Overflow) { + const CmpInst::Predicate EquivPredicate = + Add->hasNoSignedWrap() ? Pred.getPreferredSignedPredicate() + : Cmp.getPredicate(); // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2) - return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC)); + return new ICmpInst(EquivPredicate, X, ConstantInt::get(Ty, NewC)); + } } if (ICmpInst::isUnsigned(Pred) && Add->hasNoSignedWrap() && diff --git a/llvm/test/Transforms/InstCombine/icmp-add.ll b/llvm/test/Transforms/InstCombine/icmp-add.ll index a8cdf80948a84..eb693244f2057 100644 --- a/llvm/test/Transforms/InstCombine/icmp-add.ll +++ b/llvm/test/Transforms/InstCombine/icmp-add.ll @@ -3302,3 +3302,27 @@ entry: %cmp = icmp ult i32 %add, 253 ret i1 %cmp } + +define i1 @icmp_partial_negative_samesign_ult_folded_to_slt(i8 range(i8 -1, 5) %x) { +; CHECK-LABEL: @icmp_partial_negative_samesign_ult_folded_to_slt( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 2 +; CHECK-NEXT: ret i1 [[CMP]] +; +entry: + %add = add nsw i8 %x, -5 + %cmp = icmp samesign ult i8 %add, -3 + ret i1 %cmp +} + +define i1 @icmp_positive_samesign_ult_folded_to_ult(i8 range(i8 1, 5) %x) { +; CHECK-LABEL: @icmp_positive_samesign_ult_folded_to_ult( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp samesign ult i8 [[X:%.*]], 2 +; CHECK-NEXT: ret i1 [[CMP]] +; +entry: + %add = add nsw i8 %x, 1 + %cmp = icmp samesign slt i8 %add, 3 + ret i1 %cmp +}