-
Notifications
You must be signed in to change notification settings - Fork 12.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[X86] Extend combinei64TruncSrlAdd
to handle patterns with or
and xor
#128435
base: main
Are you sure you want to change the base?
[X86] Extend combinei64TruncSrlAdd
to handle patterns with or
and xor
#128435
Conversation
@llvm/pr-subscribers-backend-x86 Author: João Gouveia (joaotgouveia) ChangesAs discussed in #126448, the fold implemented by #126448 / #128353 can be extended to operations other than CC: @phoebewang @RKSimon Full diff: https://github.com/llvm/llvm-project/pull/128435.diff 2 Files Affected:
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index c146e1e6c0334..47dc9ffb4b24d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -53733,36 +53733,42 @@ static SDValue combineLRINT_LLRINT(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(X86ISD::CVTP2SI, DL, VT, Src);
}
-// Attempt to fold some (truncate (srl (add X, C1), C2)) patterns to
-// (add (truncate (srl X, C2)), C1'). C1' will be smaller than C1 so we are able
-// to avoid generating code with MOVABS and large constants in certain cases.
-static SDValue combinei64TruncSrlAdd(SDValue N, EVT VT, SelectionDAG &DAG,
- const SDLoc &DL) {
+// Attempt to fold some (truncate (srl (binop X, C1), C2)) patterns to
+// (binop (truncate (srl X, C2)), C1'). C1' will be smaller than C1 so we are
+// able to avoid generating code with MOVABS and large constants in certain
+// cases.
+static SDValue combinei64TruncSrlBinop(SDValue N, EVT VT, SelectionDAG &DAG,
+ const SDLoc &DL) {
using namespace llvm::SDPatternMatch;
- SDValue AddLhs;
- APInt AddConst, SrlConst;
+ SDValue BinopLhs;
+ APInt BinopConst, SrlConst;
if (VT != MVT::i32 ||
- !sd_match(N, m_AllOf(m_SpecificVT(MVT::i64),
- m_Srl(m_OneUse(m_Add(m_Value(AddLhs),
- m_ConstInt(AddConst))),
- m_ConstInt(SrlConst)))))
+ !sd_match(
+ N,
+ m_AllOf(m_SpecificVT(MVT::i64),
+ m_Srl(m_OneUse(m_AnyOf(
+ m_Add(m_Value(BinopLhs), m_ConstInt(BinopConst)),
+ m_Or(m_Value(BinopLhs), m_ConstInt(BinopConst)),
+ m_Xor(m_Value(BinopLhs), m_ConstInt(BinopConst)))),
+ m_ConstInt(SrlConst)))))
return SDValue();
- if (SrlConst.ule(32) || AddConst.countr_zero() < SrlConst.getZExtValue())
+ if (SrlConst.ule(32) || BinopConst.countr_zero() < SrlConst.getZExtValue())
return SDValue();
- SDValue AddLHSSrl =
- DAG.getNode(ISD::SRL, DL, MVT::i64, AddLhs, N.getOperand(1));
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, AddLHSSrl);
+ SDValue BinopLHSSrl =
+ DAG.getNode(ISD::SRL, DL, MVT::i64, BinopLhs, N.getOperand(1));
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, BinopLHSSrl);
- APInt NewAddConstVal = AddConst.lshr(SrlConst).trunc(VT.getSizeInBits());
- SDValue NewAddConst = DAG.getConstant(NewAddConstVal, DL, VT);
- SDValue NewAddNode = DAG.getNode(ISD::ADD, DL, VT, Trunc, NewAddConst);
+ APInt NewBinopConstVal = BinopConst.lshr(SrlConst).trunc(VT.getSizeInBits());
+ SDValue NewBinopConst = DAG.getConstant(NewBinopConstVal, DL, VT);
+ SDValue NewBinopNode =
+ DAG.getNode(N.getOperand(0).getOpcode(), DL, VT, Trunc, NewBinopConst);
EVT CleanUpVT =
EVT::getIntegerVT(*DAG.getContext(), 64 - SrlConst.getZExtValue());
- return DAG.getZeroExtendInReg(NewAddNode, DL, CleanUpVT);
+ return DAG.getZeroExtendInReg(NewBinopNode, DL, CleanUpVT);
}
/// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
@@ -53810,11 +53816,9 @@ static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
if (!Src.hasOneUse())
return SDValue();
- if (SDValue R = combinei64TruncSrlAdd(Src, VT, DAG, DL))
+ if (SDValue R = combinei64TruncSrlBinop(Src, VT, DAG, DL))
return R;
- // Only support vector truncation for now.
- // TODO: i64 scalar math would benefit as well.
if (!VT.isVector())
return SDValue();
diff --git a/llvm/test/CodeGen/X86/combine-i64-trunc-srl-add.ll b/llvm/test/CodeGen/X86/combine-i64-trunc-srl-add.ll
index 14992ca5bf488..ec29cf9d56c29 100644
--- a/llvm/test/CodeGen/X86/combine-i64-trunc-srl-add.ll
+++ b/llvm/test/CodeGen/X86/combine-i64-trunc-srl-add.ll
@@ -128,6 +128,61 @@ define i32 @test_trunc_add(i64 %x) {
ret i32 %conv
}
+define i32 @test_trunc_sub(i64 %x) {
+; X64-LABEL: test_trunc_sub:
+; X64: # %bb.0:
+; X64-NEXT: shrq $48, %rdi
+; X64-NEXT: addl $65522, %edi # imm = 0xFFF2
+; X64-NEXT: movzwl %di, %eax
+; X64-NEXT: retq
+ %sub = sub i64 %x, 3940649673949184
+ %shr = lshr i64 %sub, 48
+ %conv = trunc i64 %shr to i32
+ ret i32 %conv
+}
+
+define i32 @test_trunc_and(i64 %x) {
+; X64-LABEL: test_trunc_and:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shrq $48, %rax
+; X64-NEXT: andl $14, %eax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: retq
+ %and = and i64 %x, 3940649673949184
+ %shr = lshr i64 %and, 48
+ %conv = trunc i64 %shr to i32
+ ret i32 %conv
+}
+
+define i32 @test_trunc_or(i64 %x) {
+; X64-LABEL: test_trunc_or:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shrq $48, %rax
+; X64-NEXT: orl $14, %eax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: retq
+ %or = or i64 %x, 3940649673949184
+ %shr = lshr i64 %or, 48
+ %conv = trunc i64 %shr to i32
+ ret i32 %conv
+}
+
+define i32 @test_trunc_xor(i64 %x) {
+; X64-LABEL: test_trunc_xor:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shrq $48, %rax
+; X64-NEXT: xorl $14, %eax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: retq
+ %xor = xor i64 %x, 3940649673949184
+ %shr = lshr i64 %xor, 48
+ %conv = trunc i64 %shr to i32
+ ret i32 %conv
+}
+
; Make sure we don't crash on this test case.
define i32 @pr128158(i64 %x) {
@@ -137,10 +192,10 @@ define i32 @pr128158(i64 %x) {
; X64-NEXT: addq %rdi, %rax
; X64-NEXT: shrq $32, %rax
; X64-NEXT: .p2align 4
-; X64-NEXT: .LBB9_1: # %for.body
+; X64-NEXT: .LBB13_1: # %for.body
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: cmpl $9, %eax
-; X64-NEXT: jb .LBB9_1
+; X64-NEXT: jb .LBB13_1
; X64-NEXT: # %bb.2: # %exit
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
|
return SDValue(); | ||
|
||
if (SrlConst.ule(32) || AddConst.countr_zero() < SrlConst.getZExtValue()) | ||
if (SrlConst.ule(32) || BinopConst.countr_zero() < SrlConst.getZExtValue()) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
For or/xor
they don't need the second condition and without the limition https://alive2.llvm.org/ce/z/uEAYxG, the and
I showed in https://godbolt.org/z/1za939PKc should also work.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've added test cases that cover the scenario you mentioned for AND
, as well as equivalent cases for OR
and XOR
. It seems the fold works as-is for these cases, though I'm not entirely sure why.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You are right. I mistook it with add
.
EVT CleanUpVT = | ||
EVT::getIntegerVT(*DAG.getContext(), 64 - SrlConst.getZExtValue()); | ||
return DAG.getZeroExtendInReg(NewAddNode, DL, CleanUpVT); | ||
return DAG.getZeroExtendInReg(NewBinopNode, DL, CleanUpVT); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We are free to use anyext for and/or/xor
.
@@ -53810,11 +53816,9 @@ static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG, | |||
if (!Src.hasOneUse()) | |||
return SDValue(); | |||
|
|||
if (SDValue R = combinei64TruncSrlAdd(Src, VT, DAG, DL)) | |||
if (SDValue R = combinei64TruncSrlBinop(Src, VT, DAG, DL)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Does it work for MUL
? Add test case if yes and exclude it explicitly in code if not?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We also have other Binop like min/max etc., so we should add switch check like below.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This exact fold doesn't work for MUL
: https://alive2.llvm.org/ce/z/Xoz8hb (had to reduce the size of the ints because Alive was timing out).
m_Add(m_Value(BinopLhs), m_ConstInt(BinopConst)), | ||
m_Or(m_Value(BinopLhs), m_ConstInt(BinopConst)), | ||
m_Xor(m_Value(BinopLhs), m_ConstInt(BinopConst)))), |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Use m_Binary
once we limited the binop to add/sub/and/or/xor
and/or mul
.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think we can get rid of SDPatternMatch by checking VT == MVT::i32 && SrcVT == MVT::i64 && isa<ConstantSDNode>(N->getOperand(1)) && isa<ConstantSDNode>(Src.getOperand(1))
before calling combinei64TruncSrlBinop
.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Wouldn't it be neater to move those checks inside combinei64TruncSrlBinop
? We could use m_Binary
to match the pattern and then switch case the opcode to perform the different checks required for AND
/OR
/XOR
and ADD
/SUB
.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
My assumption is when we have only one exact match, either m_Add
or m_Binary
, would be good. If neither match, we don't bother it, because the check for OR/XOR/ADD/SUB
is enough.
As discussed in #126448, the fold implemented by #126448 / #128353 can be extended to operations other than
add
. This patch extends the fold performed bycombinei64TruncSrlAdd
to includeor
(proof: https://alive2.llvm.org/ce/z/SDZgos) andxor
(proof: https://alive2.llvm.org/ce/z/fW8MSx). There's no need to extend it tosub
andand
, as similar folds are already being performed for those operations.CC: @phoebewang @RKSimon