58 if (
F.getFnAttribute(
"disable-tail-calls").getValueAsBool())
64 AttrBuilder CallerAttrs(
F.getContext(),
F.getAttributes().getRetAttrs());
65 for (
const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
66 Attribute::DereferenceableOrNull, Attribute::NoAlias,
67 Attribute::NonNull, Attribute::NoUndef,
68 Attribute::Range, Attribute::NoFPClass})
75 if (CallerAttrs.
contains(Attribute::ZExt) ||
76 CallerAttrs.
contains(Attribute::SExt))
87 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
103 Register ArgReg = cast<RegisterSDNode>(
Value->getOperand(1))->getReg();
104 if (
MRI.getLiveInPhysReg(ArgReg) != Reg)
114 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
115 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
116 IsNoExt = Call->paramHasAttr(ArgIdx, Attribute::NoExt);
117 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
118 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
119 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
120 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
121 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
122 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
123 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
124 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
125 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
126 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
127 Alignment = Call->getParamStackAlign(ArgIdx);
130 "multiple ABI attributes?");
146std::pair<SDValue, SDValue>
156 Args.reserve(Ops.
size());
159 for (
unsigned i = 0; i < Ops.
size(); ++i) {
162 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
165 Entry.IsZExt = !Entry.IsSExt;
169 Entry.IsSExt = Entry.IsZExt =
false;
171 Args.push_back(Entry);
174 if (LC == RTLIB::UNKNOWN_LIBCALL)
182 bool zeroExtend = !signExtend;
186 signExtend = zeroExtend =
false;
197 return LowerCallTo(CLI);
201 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
203 if (Limit != ~
unsigned(0) &&
Op.isMemcpyWithFixedDstAlign() &&
204 Op.getSrcAlign() <
Op.getDstAlign())
209 if (VT == MVT::Other) {
213 VT = MVT::LAST_INTEGER_VALUETYPE;
214 if (
Op.isFixedDstAlign())
221 MVT LVT = MVT::LAST_INTEGER_VALUETYPE;
232 unsigned NumMemOps = 0;
236 while (VTSize >
Size) {
247 else if (NewVT == MVT::i64 &&
259 if (NewVT == MVT::i8)
268 if (NumMemOps &&
Op.allowOverlap() && NewVTSize <
Size &&
270 VT, DstAS,
Op.isFixedDstAlign() ?
Op.getDstAlign() :
Align(1),
280 if (++NumMemOps > Limit)
283 MemOps.push_back(VT);
298 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS,
308 bool IsSignaling)
const {
313 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128)
314 &&
"Unsupported setcc type!");
317 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
318 bool ShouldInvertCC =
false;
322 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
323 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
324 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
328 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 :
329 (VT == MVT::f64) ? RTLIB::UNE_F64 :
330 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128;
334 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
335 (VT == MVT::f64) ? RTLIB::OGE_F64 :
336 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
340 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
341 (VT == MVT::f64) ? RTLIB::OLT_F64 :
342 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
346 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
347 (VT == MVT::f64) ? RTLIB::OLE_F64 :
348 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
352 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
353 (VT == MVT::f64) ? RTLIB::OGT_F64 :
354 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
357 ShouldInvertCC =
true;
360 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
361 (VT == MVT::f64) ? RTLIB::UO_F64 :
362 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
366 ShouldInvertCC =
true;
369 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
370 (VT == MVT::f64) ? RTLIB::UO_F64 :
371 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128;
372 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 :
373 (VT == MVT::f64) ? RTLIB::OEQ_F64 :
374 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128;
378 ShouldInvertCC =
true;
381 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 :
382 (VT == MVT::f64) ? RTLIB::OGE_F64 :
383 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128;
386 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 :
387 (VT == MVT::f64) ? RTLIB::OGT_F64 :
388 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128;
391 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 :
392 (VT == MVT::f64) ? RTLIB::OLE_F64 :
393 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128;
396 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 :
397 (VT == MVT::f64) ? RTLIB::OLT_F64 :
398 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128;
406 SDValue Ops[2] = {NewLHS, NewRHS};
411 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain);
416 if (ShouldInvertCC) {
418 CCCode = getSetCCInverse(CCCode, RetVT);
421 if (LC2 == RTLIB::UNKNOWN_LIBCALL) {
428 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain);
431 CCCode = getSetCCInverse(CCCode, RetVT);
432 NewLHS = DAG.
getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode);
446 if (!isPositionIndependent())
460 unsigned JTEncoding = getJumpTableEncoding();
496 if (!TM.shouldAssumeDSOLocal(GV))
500 if (isPositionIndependent())
516 const APInt &DemandedElts,
519 unsigned Opcode =
Op.getOpcode();
527 if (targetShrinkDemandedConstant(
Op,
DemandedBits, DemandedElts, TLO))
537 auto *Op1C = dyn_cast<ConstantSDNode>(
Op.getOperand(1));
538 if (!Op1C || Op1C->isOpaque())
542 const APInt &
C = Op1C->getAPIntValue();
547 EVT VT =
Op.getValueType();
564 EVT VT =
Op.getValueType();
579 "ShrinkDemandedOp only supports binary operators!");
580 assert(
Op.getNode()->getNumValues() == 1 &&
581 "ShrinkDemandedOp only supports nodes with one result!");
583 EVT VT =
Op.getValueType();
592 Op.getOperand(1).getValueType().getScalarSizeInBits() ==
BitWidth &&
593 "ShrinkDemandedOp only supports operands that have the same size!");
597 if (!
Op.getNode()->hasOneUse())
614 Op.getOpcode(), dl, SmallVT,
617 assert(DemandedSize <= SmallVTBits &&
"Narrowed below demanded bits?");
632 bool Simplified = SimplifyDemandedBits(
Op,
DemandedBits, Known, TLO);
641 const APInt &DemandedElts,
661 bool AssumeSingleUse)
const {
662 EVT VT =
Op.getValueType();
678 EVT VT =
Op.getValueType();
696 switch (
Op.getOpcode()) {
702 EVT SrcVT = Src.getValueType();
703 EVT DstVT =
Op.getValueType();
709 if (NumSrcEltBits == NumDstEltBits)
710 if (
SDValue V = SimplifyMultipleUseDemandedBits(
714 if (SrcVT.
isVector() && (NumDstEltBits % NumSrcEltBits) == 0) {
715 unsigned Scale = NumDstEltBits / NumSrcEltBits;
719 for (
unsigned i = 0; i != Scale; ++i) {
720 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
721 unsigned BitOffset = EltOffset * NumSrcEltBits;
724 DemandedSrcBits |= Sub;
725 for (
unsigned j = 0; j != NumElts; ++j)
727 DemandedSrcElts.
setBit((j * Scale) + i);
731 if (
SDValue V = SimplifyMultipleUseDemandedBits(
732 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
737 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) {
738 unsigned Scale = NumSrcEltBits / NumDstEltBits;
742 for (
unsigned i = 0; i != NumElts; ++i)
743 if (DemandedElts[i]) {
744 unsigned Offset = (i % Scale) * NumDstEltBits;
746 DemandedSrcElts.
setBit(i / Scale);
749 if (
SDValue V = SimplifyMultipleUseDemandedBits(
750 Src, DemandedSrcBits, DemandedSrcElts, DAG,
Depth + 1))
771 return Op.getOperand(0);
773 return Op.getOperand(1);
784 return Op.getOperand(0);
786 return Op.getOperand(1);
796 return Op.getOperand(0);
798 return Op.getOperand(1);
804 return Op.getOperand(0);
808 return Op.getOperand(1);
814 if (std::optional<uint64_t> MaxSA =
817 unsigned ShAmt = *MaxSA;
818 unsigned NumSignBits =
821 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
829 if (std::optional<uint64_t> MaxSA =
832 unsigned ShAmt = *MaxSA;
836 unsigned NumSignBits =
868 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
875 if (NumSignBits >= (
BitWidth - ExBits + 1))
888 EVT SrcVT = Src.getValueType();
889 EVT DstVT =
Op.getValueType();
890 if (IsLE && DemandedElts == 1 &&
903 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
906 !DemandedElts[CIdx->getZExtValue()])
920 if (DemandedSubElts == 0)
930 bool AllUndef =
true, IdentityLHS =
true, IdentityRHS =
true;
931 for (
unsigned i = 0; i != NumElts; ++i) {
932 int M = ShuffleMask[i];
933 if (M < 0 || !DemandedElts[i])
936 IdentityLHS &= (M == (int)i);
937 IdentityRHS &= ((M - NumElts) == i);
943 return Op.getOperand(0);
945 return Op.getOperand(1);
955 if (
SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
965 unsigned Depth)
const {
966 EVT VT =
Op.getValueType();
973 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
979 unsigned Depth)
const {
981 return SimplifyMultipleUseDemandedBits(
Op,
DemandedBits, DemandedElts, DAG,
993 "SRL or SRA node is required here!");
996 if (!N1C || !N1C->
isOne())
1043 unsigned ShiftOpc =
Op.getOpcode();
1044 bool IsSigned =
false;
1048 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1;
1053 unsigned NumZero = std::min(NumZeroA, NumZeroB);
1059 if (NumZero >= 2 && NumSigned < NumZero) {
1064 if (NumSigned >= 1) {
1072 if (NumZero >= 1 && NumSigned < NumZero) {
1092 EVT VT =
Op.getValueType();
1106 Add.getOperand(1)) &&
1117 (isa<ConstantSDNode>(ExtOpA) || isa<ConstantSDNode>(ExtOpB)))
1137 unsigned Depth,
bool AssumeSingleUse)
const {
1140 "Mask size mismatches value type size!");
1145 EVT VT =
Op.getValueType();
1147 unsigned NumElts = OriginalDemandedElts.
getBitWidth();
1149 "Unexpected vector size");
1152 APInt DemandedElts = OriginalDemandedElts;
1172 cast<ConstantFPSDNode>(
Op)->getValueAPF().bitcastToAPInt());
1177 bool HasMultiUse =
false;
1178 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse()) {
1187 }
else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) {
1196 switch (
Op.getOpcode()) {
1200 if (!DemandedElts[0])
1205 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
1207 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO,
Depth + 1))
1212 if (DemandedElts == 1)
1225 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1234 auto *LD = cast<LoadSDNode>(
Op);
1235 if (getTargetConstantFromLoad(LD)) {
1241 EVT MemVT = LD->getMemoryVT();
1253 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
1258 APInt DemandedVecElts(DemandedElts);
1260 unsigned Idx = CIdx->getZExtValue();
1264 if (!DemandedElts[
Idx])
1271 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO,
Depth + 1))
1277 if (SimplifyDemandedBits(Vec,
DemandedBits, DemandedVecElts, KnownVec, TLO,
1281 if (!!DemandedVecElts)
1296 APInt DemandedSrcElts = DemandedElts;
1300 if (SimplifyDemandedBits(Sub,
DemandedBits, DemandedSubElts, KnownSub, TLO,
1303 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, KnownSrc, TLO,
1309 if (!!DemandedSubElts)
1311 if (!!DemandedSrcElts)
1317 SDValue NewSub = SimplifyMultipleUseDemandedBits(
1319 SDValue NewSrc = SimplifyMultipleUseDemandedBits(
1321 if (NewSub || NewSrc) {
1322 NewSub = NewSub ? NewSub : Sub;
1323 NewSrc = NewSrc ? NewSrc : Src;
1336 if (Src.getValueType().isScalableVector())
1339 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
1342 if (SimplifyDemandedBits(Src,
DemandedBits, DemandedSrcElts, Known, TLO,
1348 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
1363 EVT SubVT =
Op.getOperand(0).getValueType();
1366 for (
unsigned i = 0; i != NumSubVecs; ++i) {
1367 APInt DemandedSubElts =
1368 DemandedElts.
extractBits(NumSubElts, i * NumSubElts);
1369 if (SimplifyDemandedBits(
Op.getOperand(i),
DemandedBits, DemandedSubElts,
1370 Known2, TLO,
Depth + 1))
1373 if (!!DemandedSubElts)
1383 APInt DemandedLHS, DemandedRHS;
1388 if (!!DemandedLHS || !!DemandedRHS) {
1394 if (!!DemandedLHS) {
1395 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedLHS, Known2, TLO,
1400 if (!!DemandedRHS) {
1401 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedRHS, Known2, TLO,
1408 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1410 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1412 if (DemandedOp0 || DemandedOp1) {
1413 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1414 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1449 LHSKnown.
One == ~RHSC->getAPIntValue()) {
1461 unsigned NumSubElts =
1478 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1482 Known2, TLO,
Depth + 1))
1504 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1506 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1508 if (DemandedOp0 || DemandedOp1) {
1509 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1510 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1522 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1528 if (SimplifyDemandedBits(Op0, ~Known.
One &
DemandedBits, DemandedElts,
1529 Known2, TLO,
Depth + 1)) {
1549 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1551 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1553 if (DemandedOp0 || DemandedOp1) {
1554 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1555 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1566 for (
int I = 0;
I != 2; ++
I) {
1569 SDValue Alt =
Op.getOperand(1 -
I).getOperand(0);
1570 SDValue C2 =
Op.getOperand(1 -
I).getOperand(1);
1572 for (
int J = 0; J != 2; ++J) {
1595 if (SimplifyDemandedBits(Op1,
DemandedBits, DemandedElts, Known, TLO,
1598 if (SimplifyDemandedBits(Op0,
DemandedBits, DemandedElts, Known2, TLO,
1625 if (
C->getAPIntValue() == Known2.
One) {
1634 if (!
C->isAllOnes() &&
DemandedBits.isSubsetOf(
C->getAPIntValue())) {
1646 if (ShiftC->getAPIntValue().ult(
BitWidth)) {
1647 uint64_t ShiftAmt = ShiftC->getZExtValue();
1650 : Ones.
lshr(ShiftAmt);
1652 isDesirableToCommuteXorWithShift(
Op.getNode())) {
1667 if (!
C || !
C->isAllOnes())
1673 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1675 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
1677 if (DemandedOp0 || DemandedOp1) {
1678 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
1679 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
1689 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1690 Known, TLO,
Depth + 1))
1692 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1693 Known2, TLO,
Depth + 1))
1704 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1705 Known, TLO,
Depth + 1))
1707 if (SimplifyDemandedBits(
Op.getOperand(1),
DemandedBits, DemandedElts,
1708 Known2, TLO,
Depth + 1))
1715 if (SimplifyDemandedBits(
Op.getOperand(3),
DemandedBits, DemandedElts,
1716 Known, TLO,
Depth + 1))
1718 if (SimplifyDemandedBits(
Op.getOperand(2),
DemandedBits, DemandedElts,
1719 Known2, TLO,
Depth + 1))
1762 if (std::optional<uint64_t> KnownSA =
1764 unsigned ShAmt = *KnownSA;
1774 if (std::optional<uint64_t> InnerSA =
1776 unsigned C1 = *InnerSA;
1778 int Diff = ShAmt - C1;
1797 if (ShAmt < InnerBits &&
DemandedBits.getActiveBits() <= InnerBits &&
1798 isTypeDesirableForOp(
ISD::SHL, InnerVT)) {
1815 InnerOp, DemandedElts,
Depth + 2)) {
1816 unsigned InnerShAmt = *SA2;
1817 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits &&
1819 (InnerBits - InnerShAmt + ShAmt) &&
1833 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
1840 Known.
Zero <<= ShAmt;
1841 Known.
One <<= ShAmt;
1847 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
1848 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
1859 Op.getNode()->hasOneUse()) {
1867 isTypeDesirableForOp(
ISD::SHL, SmallVT) &&
1870 assert(DemandedSize <= SmallVTBits &&
1871 "Narrowed below demanded bits?");
1891 isTypeDesirableForOp(
ISD::SHL, HalfVT) &&
1900 Flags.setNoSignedWrap(IsNSW);
1901 Flags.setNoUnsignedWrap(IsNUW);
1906 NewShiftAmt, Flags);
1919 if (SimplifyDemandedBits(Op0, DemandedFromOp, DemandedElts, Known, TLO,
1932 if (std::optional<uint64_t> MaxSA =
1934 unsigned ShAmt = *MaxSA;
1935 unsigned NumSignBits =
1938 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits))
1948 if (std::optional<uint64_t> KnownSA =
1950 unsigned ShAmt = *KnownSA;
1960 if (std::optional<uint64_t> InnerSA =
1962 unsigned C1 = *InnerSA;
1964 int Diff = ShAmt - C1;
1980 if (std::optional<uint64_t> InnerSA =
1982 unsigned C1 = *InnerSA;
1984 unsigned Combined = std::min(C1 + ShAmt,
BitWidth - 1);
1996 if (
Op->getFlags().hasExact())
2005 isTypeDesirableForOp(
ISD::SRL, HalfVT) &&
2021 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2031 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2032 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2046 if (std::optional<uint64_t> MaxSA =
2048 unsigned ShAmt = *MaxSA;
2052 unsigned NumSignBits =
2061 DemandedElts,
Depth + 1))
2085 if (std::optional<uint64_t> KnownSA =
2087 unsigned ShAmt = *KnownSA;
2094 if (std::optional<uint64_t> InnerSA =
2096 unsigned LowBits =
BitWidth - ShAmt;
2102 if (*InnerSA == ShAmt) {
2112 unsigned NumSignBits =
2114 if (NumSignBits > ShAmt)
2124 if (
Op->getFlags().hasExact())
2132 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
2143 Flags.setExact(
Op->getFlags().hasExact());
2161 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2162 Op0, InDemandedMask, DemandedElts, TLO.
DAG,
Depth + 1);
2172 DemandedElts,
Depth + 1))
2185 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2190 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1,
DemandedBits, DemandedElts,
2191 Known, TLO,
Depth + 1))
2200 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2203 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO,
2216 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2217 Op0, Demanded0, DemandedElts, TLO.
DAG,
Depth + 1);
2218 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2219 Op1, Demanded1, DemandedElts, TLO.
DAG,
Depth + 1);
2220 if (DemandedOp0 || DemandedOp1) {
2221 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0;
2222 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1;
2233 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts,
2234 Known2, TLO,
Depth + 1))
2250 unsigned Amt = SA->getAPIntValue().urem(
BitWidth);
2256 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO,
2266 DemandedBits.countr_zero() >= (IsROTL ? Amt : RevAmt)) {
2271 DemandedBits.countl_zero() >= (IsROTL ? RevAmt : Amt)) {
2280 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO,
2290 unsigned Opc =
Op.getOpcode();
2297 unsigned NumSignBits =
2301 if (NumSignBits >= NumDemandedUpperBits)
2342 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2368 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ;
2376 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO,
2396 EVT ExVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2401 unsigned MinSignedBits =
2403 bool AlreadySignExtended = ExVTBits >= MinSignedBits;
2406 if (!AlreadySignExtended) {
2424 InputDemandedBits.
setBit(ExVTBits - 1);
2426 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO,
2434 if (Known.
Zero[ExVTBits - 1])
2438 if (Known.
One[ExVTBits - 1]) {
2448 EVT HalfVT =
Op.getOperand(0).getValueType();
2456 if (SimplifyDemandedBits(
Op.getOperand(0), MaskLo, KnownLo, TLO,
Depth + 1))
2459 if (SimplifyDemandedBits(
Op.getOperand(1), MaskHi, KnownHi, TLO,
Depth + 1))
2462 Known = KnownHi.
concat(KnownLo);
2471 EVT SrcVT = Src.getValueType();
2480 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2491 APInt InDemandedElts = DemandedElts.
zext(InElts);
2492 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2501 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2502 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2512 EVT SrcVT = Src.getValueType();
2517 APInt InDemandedElts = DemandedElts.
zext(InElts);
2522 InDemandedBits.
setBit(InBits - 1);
2528 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2543 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2564 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2565 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2575 EVT SrcVT = Src.getValueType();
2582 if (IsLE && IsVecInReg && DemandedElts == 1 &&
2587 APInt InDemandedElts = DemandedElts.
zext(InElts);
2588 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
2595 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2596 Src, InDemandedBits, InDemandedElts, TLO.
DAG,
Depth + 1))
2605 unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
2607 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
2617 if (
SDValue NewSrc = SimplifyMultipleUseDemandedBits(
2618 Src, TruncMask, DemandedElts, TLO.
DAG,
Depth + 1))
2623 switch (Src.getOpcode()) {
2634 if (Src.getNode()->hasOneUse()) {
2646 std::optional<uint64_t> ShAmtC =
2648 if (!ShAmtC || *ShAmtC >=
BitWidth)
2674 EVT ZVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2676 if (SimplifyDemandedBits(
Op.getOperand(0), ~InMask |
DemandedBits, Known,
2680 Known.
Zero |= ~InMask;
2681 Known.
One &= (~Known.Zero);
2687 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
2688 unsigned EltBitWidth = Src.getScalarValueSizeInBits();
2696 if (
auto *CIdx = dyn_cast<ConstantSDNode>(
Idx))
2697 if (CIdx->getAPIntValue().ult(NumSrcElts))
2704 DemandedSrcBits = DemandedSrcBits.
trunc(EltBitWidth);
2706 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO,
2712 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2713 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2729 EVT SrcVT = Src.getValueType();
2739 if ((OpVTLegal || i32Legal) && VT.
isSimple() && SrcVT != MVT::f16 &&
2740 SrcVT != MVT::f128) {
2742 EVT Ty = OpVTLegal ? VT : MVT::i32;
2746 unsigned OpVTSizeInBits =
Op.getValueSizeInBits();
2747 if (!OpVTLegal && OpVTSizeInBits > 32)
2749 unsigned ShVal =
Op.getValueSizeInBits() - 1;
2759 unsigned Scale =
BitWidth / NumSrcEltBits;
2763 for (
unsigned i = 0; i != Scale; ++i) {
2764 unsigned EltOffset = IsLE ? i : (Scale - 1 - i);
2765 unsigned BitOffset = EltOffset * NumSrcEltBits;
2768 DemandedSrcBits |= Sub;
2769 for (
unsigned j = 0; j != NumElts; ++j)
2770 if (DemandedElts[j])
2771 DemandedSrcElts.
setBit((j * Scale) + i);
2775 APInt KnownSrcUndef, KnownSrcZero;
2776 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2777 KnownSrcZero, TLO,
Depth + 1))
2781 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2782 KnownSrcBits, TLO,
Depth + 1))
2784 }
else if (IsLE && (NumSrcEltBits %
BitWidth) == 0) {
2786 unsigned Scale = NumSrcEltBits /
BitWidth;
2790 for (
unsigned i = 0; i != NumElts; ++i)
2791 if (DemandedElts[i]) {
2794 DemandedSrcElts.
setBit(i / Scale);
2798 APInt KnownSrcUndef, KnownSrcZero;
2799 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef,
2800 KnownSrcZero, TLO,
Depth + 1))
2805 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts,
2806 KnownSrcBits, TLO,
Depth + 1))
2811 if (
SDValue DemandedSrc = SimplifyMultipleUseDemandedBits(
2812 Src, DemandedSrcBits, DemandedSrcElts, TLO.
DAG,
Depth + 1)) {
2834 if (
C &&
C->getAPIntValue().countr_zero() == CTZ) {
2853 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
2858 auto GetDemandedBitsLHSMask = [&](
APInt Demanded,
2864 if (SimplifyDemandedBits(Op1, LoMask, DemandedElts, KnownOp1, TLO,
2866 SimplifyDemandedBits(Op0, GetDemandedBitsLHSMask(LoMask, KnownOp1),
2867 DemandedElts, KnownOp0, TLO,
Depth + 1) ||
2883 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
2884 Op0, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2885 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
2886 Op1, LoMask, DemandedElts, TLO.
DAG,
Depth + 1);
2887 if (DemandedOp0 || DemandedOp1) {
2888 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
2889 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
2903 if (
C && !
C->isAllOnes() && !
C->isOne() &&
2904 (
C->getAPIntValue() | HighMask).isAllOnes()) {
2916 auto getShiftLeftAmt = [&HighMask](
SDValue Mul) ->
unsigned {
2943 if (
unsigned ShAmt = getShiftLeftAmt(Op0))
2946 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2947 return foldMul(
ISD::SUB, Op1.getOperand(0), Op0, ShAmt);
2951 if (
unsigned ShAmt = getShiftLeftAmt(Op1))
2952 return foldMul(
ISD::ADD, Op1.getOperand(0), Op0, ShAmt);
2960 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
2961 Flags.hasNoUnsignedWrap(), KnownOp0, KnownOp1);
2971 if (
Op.getValueType().isScalableVector())
2973 if (SimplifyDemandedBitsForTargetNode(
Op,
DemandedBits, DemandedElts,
2986 if (!isTargetCanonicalConstantNode(
Op) &&
2990 auto *C = dyn_cast<ConstantSDNode>(V);
2991 return C && C->isOpaque();
3012 const APInt &DemandedElts,
3018 APInt KnownUndef, KnownZero;
3020 SimplifyDemandedVectorElts(
Op, DemandedElts, KnownUndef, KnownZero, TLO);
3032 const APInt &UndefOp0,
3033 const APInt &UndefOp1) {
3036 "Vector binop only");
3041 UndefOp1.
getBitWidth() == NumElts &&
"Bad type for undef analysis");
3043 auto getUndefOrConstantElt = [&](
SDValue V,
unsigned Index,
3044 const APInt &UndefVals) {
3045 if (UndefVals[Index])
3048 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
3052 auto *
C = dyn_cast<ConstantSDNode>(Elt);
3053 if (isa<ConstantFPSDNode>(Elt) || Elt.
isUndef() || (
C && !
C->isOpaque()))
3061 for (
unsigned i = 0; i != NumElts; ++i) {
3080 bool AssumeSingleUse)
const {
3081 EVT VT =
Op.getValueType();
3082 unsigned Opcode =
Op.getOpcode();
3083 APInt DemandedElts = OriginalDemandedElts;
3089 if (!shouldSimplifyDemandedVectorElts(
Op, TLO))
3097 "Mask size mismatches value type element count!");
3106 if (!AssumeSingleUse && !
Op.getNode()->hasOneUse())
3110 if (DemandedElts == 0) {
3125 auto SimplifyDemandedVectorEltsBinOp = [&](
SDValue Op0,
SDValue Op1) {
3126 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts,
3128 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts,
3130 if (NewOp0 || NewOp1) {
3133 NewOp1 ? NewOp1 : Op1,
Op->getFlags());
3141 if (!DemandedElts[0]) {
3149 EVT SrcVT = Src.getValueType();
3161 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3171 EVT SrcVT = Src.getValueType();
3180 if (NumSrcElts == NumElts)
3181 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef,
3182 KnownZero, TLO,
Depth + 1);
3184 APInt SrcDemandedElts, SrcZero, SrcUndef;
3188 if ((NumElts % NumSrcElts) == 0) {
3189 unsigned Scale = NumElts / NumSrcElts;
3191 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3201 for (
unsigned i = 0; i != NumElts; ++i)
3202 if (DemandedElts[i]) {
3203 unsigned Ofs = (i % Scale) * EltSizeInBits;
3204 SrcDemandedBits.
setBits(Ofs, Ofs + EltSizeInBits);
3208 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known,
3216 for (
unsigned SubElt = 0; SubElt != Scale; ++SubElt) {
3220 for (
unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) {
3221 unsigned Elt = Scale * SrcElt + SubElt;
3222 if (DemandedElts[Elt])
3230 for (
unsigned i = 0; i != NumSrcElts; ++i) {
3231 if (SrcDemandedElts[i]) {
3233 KnownZero.
setBits(i * Scale, (i + 1) * Scale);
3235 KnownUndef.
setBits(i * Scale, (i + 1) * Scale);
3243 if ((NumSrcElts % NumElts) == 0) {
3244 unsigned Scale = NumSrcElts / NumElts;
3246 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero,
3252 for (
unsigned i = 0; i != NumElts; ++i) {
3253 if (DemandedElts[i]) {
3282 [&](
SDValue Elt) { return Op.getOperand(0) != Elt; })) {
3284 bool Updated =
false;
3285 for (
unsigned i = 0; i != NumElts; ++i) {
3286 if (!DemandedElts[i] && !Ops[i].
isUndef()) {
3296 for (
unsigned i = 0; i != NumElts; ++i) {
3298 if (
SrcOp.isUndef()) {
3300 }
else if (EltSizeInBits ==
SrcOp.getScalarValueSizeInBits() &&
3308 EVT SubVT =
Op.getOperand(0).getValueType();
3311 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3314 APInt SubUndef, SubZero;
3315 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO,
3318 KnownUndef.
insertBits(SubUndef, i * NumSubElts);
3319 KnownZero.
insertBits(SubZero, i * NumSubElts);
3324 bool FoundNewSub =
false;
3326 for (
unsigned i = 0; i != NumSubVecs; ++i) {
3329 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts(
3330 SubOp, SubElts, TLO.
DAG,
Depth + 1);
3331 DemandedSubOps.
push_back(NewSubOp ? NewSubOp : SubOp);
3332 FoundNewSub = NewSubOp ?
true : FoundNewSub;
3350 APInt DemandedSrcElts = DemandedElts;
3353 APInt SubUndef, SubZero;
3354 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO,
3359 if (!DemandedSrcElts && !Src.isUndef())
3364 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero,
3372 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3373 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3374 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts(
3375 Sub, DemandedSubElts, TLO.
DAG,
Depth + 1);
3376 if (NewSrc || NewSub) {
3377 NewSrc = NewSrc ? NewSrc : Src;
3378 NewSub = NewSub ? NewSub : Sub;
3380 NewSub,
Op.getOperand(2));
3389 if (Src.getValueType().isScalableVector())
3392 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3395 APInt SrcUndef, SrcZero;
3396 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3404 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
3405 Src, DemandedSrcElts, TLO.
DAG,
Depth + 1);
3417 auto *CIdx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
3421 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) {
3422 unsigned Idx = CIdx->getZExtValue();
3423 if (!DemandedElts[
Idx])
3426 APInt DemandedVecElts(DemandedElts);
3428 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
3429 KnownZero, TLO,
Depth + 1))
3438 APInt VecUndef, VecZero;
3439 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO,
3452 APInt UndefSel, ZeroSel;
3453 if (SimplifyDemandedVectorElts(Sel, DemandedElts, UndefSel, ZeroSel, TLO,
3458 APInt DemandedLHS(DemandedElts);
3459 APInt DemandedRHS(DemandedElts);
3460 APInt UndefLHS, ZeroLHS;
3461 APInt UndefRHS, ZeroRHS;
3462 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3465 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3469 KnownUndef = UndefLHS & UndefRHS;
3470 KnownZero = ZeroLHS & ZeroRHS;
3474 APInt DemandedSel = DemandedElts & ~KnownZero;
3475 if (DemandedSel != DemandedElts)
3476 if (SimplifyDemandedVectorElts(Sel, DemandedSel, UndefSel, ZeroSel, TLO,
3488 APInt DemandedLHS(NumElts, 0);
3489 APInt DemandedRHS(NumElts, 0);
3490 for (
unsigned i = 0; i != NumElts; ++i) {
3491 int M = ShuffleMask[i];
3492 if (M < 0 || !DemandedElts[i])
3494 assert(0 <= M && M < (
int)(2 * NumElts) &&
"Shuffle index out of range");
3495 if (M < (
int)NumElts)
3498 DemandedRHS.
setBit(M - NumElts);
3502 APInt UndefLHS, ZeroLHS;
3503 APInt UndefRHS, ZeroRHS;
3504 if (SimplifyDemandedVectorElts(
LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO,
3507 if (SimplifyDemandedVectorElts(
RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO,
3512 bool Updated =
false;
3513 bool IdentityLHS =
true, IdentityRHS =
true;
3515 for (
unsigned i = 0; i != NumElts; ++i) {
3516 int &M = NewMask[i];
3519 if (!DemandedElts[i] || (M < (
int)NumElts && UndefLHS[M]) ||
3520 (M >= (
int)NumElts && UndefRHS[M - NumElts])) {
3524 IdentityLHS &= (M < 0) || (M == (
int)i);
3525 IdentityRHS &= (M < 0) || ((M - NumElts) == i);
3530 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.
LegalOps) {
3532 buildLegalVectorShuffle(VT,
DL,
LHS,
RHS, NewMask, TLO.
DAG);
3538 for (
unsigned i = 0; i != NumElts; ++i) {
3539 int M = ShuffleMask[i];
3542 }
else if (M < (
int)NumElts) {
3548 if (UndefRHS[M - NumElts])
3550 if (ZeroRHS[M - NumElts])
3559 APInt SrcUndef, SrcZero;
3561 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3562 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
3563 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO,
3570 Op.getValueSizeInBits() == Src.getValueSizeInBits() &&
3571 DemandedSrcElts == 1) {
3584 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() ==
ISD::AND &&
3585 Op->isOnlyUserOf(Src.getNode()) &&
3586 Op.getValueSizeInBits() == Src.getValueSizeInBits()) {
3588 EVT SrcVT = Src.getValueType();
3595 ISD::AND,
DL, SrcVT, {Src.getOperand(1), Mask})) {
3609 if (Op0 == Op1 &&
Op->isOnlyUserOf(Op0.
getNode())) {
3610 APInt UndefLHS, ZeroLHS;
3611 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3632 APInt UndefRHS, ZeroRHS;
3633 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3636 APInt UndefLHS, ZeroLHS;
3637 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3641 KnownZero = ZeroLHS & ZeroRHS;
3647 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3659 APInt UndefRHS, ZeroRHS;
3660 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO,
3663 APInt UndefLHS, ZeroLHS;
3664 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO,
3668 KnownZero = ZeroLHS;
3669 KnownUndef = UndefLHS & UndefRHS;
3674 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3685 APInt SrcUndef, SrcZero;
3686 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO,
3691 APInt DemandedElts0 = DemandedElts & ~SrcZero;
3692 if (SimplifyDemandedVectorElts(Op0, DemandedElts0, KnownUndef, KnownZero,
3696 KnownUndef &= DemandedElts0;
3697 KnownZero &= DemandedElts0;
3702 if (DemandedElts.
isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef))
3709 KnownZero |= SrcZero;
3710 KnownUndef &= SrcUndef;
3711 KnownUndef &= ~KnownZero;
3715 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1))
3722 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3723 KnownZero, TLO,
Depth + 1))
3727 if (
SDValue NewOp = SimplifyMultipleUseDemandedVectorElts(
3728 Op.getOperand(0), DemandedElts, TLO.
DAG,
Depth + 1))
3742 if (SimplifyDemandedVectorElts(
Op.getOperand(0), DemandedElts, KnownUndef,
3743 KnownZero, TLO,
Depth + 1))
3749 if (SimplifyDemandedVectorEltsForTargetNode(
Op, DemandedElts, KnownUndef,
3750 KnownZero, TLO,
Depth))
3755 if (SimplifyDemandedBits(
Op,
DemandedBits, OriginalDemandedElts, Known,
3756 TLO,
Depth, AssumeSingleUse))
3762 assert((KnownUndef & KnownZero) == 0 &&
"Elements flagged as undef AND zero");
3776 const APInt &DemandedElts,
3778 unsigned Depth)
const {
3783 "Should use MaskedValueIsZero if you don't know whether Op"
3784 " is a target node!");
3791 unsigned Depth)
const {
3803 unsigned Depth)
const {
3812 unsigned Depth)
const {
3817 "Should use ComputeNumSignBits if you don't know whether Op"
3818 " is a target node!");
3835 "Should use SimplifyDemandedVectorElts if you don't know whether Op"
3836 " is a target node!");
3847 "Should use SimplifyDemandedBits if you don't know whether Op"
3848 " is a target node!");
3849 computeKnownBitsForTargetNode(
Op, Known, DemandedElts, TLO.
DAG,
Depth);
3861 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
3862 " is a target node!");
3895 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
3896 " is a target node!");
3900 return !canCreateUndefOrPoisonForTargetNode(
Op, DemandedElts, DAG,
PoisonOnly,
3903 return DAG.isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly,
3915 "Should use canCreateUndefOrPoison if you don't know whether Op"
3916 " is a target node!");
3924 unsigned Depth)
const {
3929 "Should use isKnownNeverNaN if you don't know whether Op"
3930 " is a target node!");
3935 const APInt &DemandedElts,
3938 unsigned Depth)
const {
3943 "Should use isSplatValue if you don't know whether Op"
3944 " is a target node!");
3959 CVal = CN->getAPIntValue();
3960 EltWidth =
N.getValueType().getScalarSizeInBits();
3967 CVal = CVal.
trunc(EltWidth);
3973 return CVal.
isOne();
4015 return (
N->isOne() && !SExt) || (SExt && (
N->getValueType(0) != MVT::i1));
4018 return N->isAllOnes() && SExt;
4027 DAGCombinerInfo &DCI)
const {
4055 auto *AndC = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
4056 if (AndC &&
isNullConstant(N1) && AndC->getAPIntValue().isPowerOf2() &&
4059 AndC->getAPIntValue().getActiveBits());
4086 if (isXAndYEqZeroPreferableToXAndYEqY(
Cond, OpVT) &&
4094 if (DCI.isBeforeLegalizeOps() ||
4128SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck(
4133 if (!(C1 = dyn_cast<ConstantSDNode>(N1)))
4142 if (!(C01 = dyn_cast<ConstantSDNode>(N0->
getOperand(1))))
4146 EVT XVT =
X.getValueType();
4170 auto checkConstants = [&
I1, &I01]() ->
bool {
4175 if (checkConstants()) {
4183 if (!checkConstants())
4189 const unsigned KeptBits =
I1.logBase2();
4190 const unsigned KeptBitsMinusOne = I01.
logBase2();
4193 if (KeptBits != (KeptBitsMinusOne + 1))
4207 return DAG.
getSetCC(
DL, SCCVT, SExtInReg,
X, NewCond);
4211SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift(
4213 DAGCombinerInfo &DCI,
const SDLoc &
DL)
const {
4215 "Should be a comparison with 0.");
4217 "Valid only for [in]equality comparisons.");
4219 unsigned NewShiftOpcode;
4229 unsigned OldShiftOpcode =
V.getOpcode();
4230 switch (OldShiftOpcode) {
4242 C =
V.getOperand(0);
4247 Y =
V.getOperand(1);
4252 X, XC,
CC,
Y, OldShiftOpcode, NewShiftOpcode, DAG);
4269 EVT VT =
X.getValueType();
4284 DAGCombinerInfo &DCI)
const {
4287 "Unexpected binop");
4315 if (!DCI.isCalledByLegalizer())
4316 DCI.AddToWorklist(YShl1.
getNode());
4331 if (CTPOP.getOpcode() !=
ISD::CTPOP || !CTPOP.hasOneUse())
4334 EVT CTVT = CTPOP.getValueType();
4335 SDValue CTOp = CTPOP.getOperand(0);
4355 for (
unsigned i = 0; i <
Passes; i++) {
4404 auto getRotateSource = [](
SDValue X) {
4406 return X.getOperand(0);
4413 if (
SDValue R = getRotateSource(N0))
4446 if (!C1 || !C1->
isZero())
4455 if (!ShAmtC || ShAmtC->getAPIntValue().uge(
BitWidth))
4459 unsigned ShAmt = ShAmtC->getZExtValue();
4468 if (
Or.getOperand(0) ==
Other) {
4469 X =
Or.getOperand(0);
4470 Y =
Or.getOperand(1);
4473 if (
Or.getOperand(1) ==
Other) {
4474 X =
Or.getOperand(1);
4475 Y =
Or.getOperand(0);
4485 if (matchOr(F0, F1)) {
4492 if (matchOr(F1, F0)) {
4508 const SDLoc &dl)
const {
4518 bool N0ConstOrSplat =
4520 bool N1ConstOrSplat =
4528 if (N0ConstOrSplat && !N1ConstOrSplat &&
4531 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4537 if (!N0ConstOrSplat && !N1ConstOrSplat &&
4542 return DAG.
getSetCC(dl, VT, N1, N0, SwappedCC);
4551 const APInt &C1 = N1C->getAPIntValue();
4571 return DAG.
getNode(LogicOp, dl, VT, IsXZero, IsYZero);
4601 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
4602 const APInt &C1 = N1C->getAPIntValue();
4617 if (
auto *
C = dyn_cast<ConstantSDNode>(N0->
getOperand(1)))
4618 if ((
C->getAPIntValue()+1).isPowerOf2()) {
4619 MinBits =
C->getAPIntValue().countr_one();
4627 }
else if (
auto *LN0 = dyn_cast<LoadSDNode>(N0)) {
4630 MinBits = LN0->getMemoryVT().getSizeInBits();
4634 MinBits = LN0->getMemoryVT().getSizeInBits();
4645 MinBits >= ReqdBits) {
4647 if (isTypeDesirableForOp(
ISD::SETCC, MinVT)) {
4650 if (MinBits == 1 && C1 == 1)
4669 if (TopSetCC.
getValueType() == MVT::i1 && VT == MVT::i1 &&
4682 cast<CondCodeSDNode>(TopSetCC.
getOperand(2))->get(),
4701 auto *Lod = cast<LoadSDNode>(N0.
getOperand(0));
4703 unsigned bestWidth = 0, bestOffset = 0;
4704 if (Lod->isSimple() && Lod->isUnindexed() &&
4705 (Lod->getMemoryVT().isByteSized() ||
4707 unsigned memWidth = Lod->getMemoryVT().getStoreSizeInBits();
4709 unsigned maskWidth = origWidth;
4713 origWidth = Lod->getMemoryVT().getSizeInBits();
4717 for (
unsigned width = 8; width < origWidth; width *= 2) {
4724 unsigned maxOffset = origWidth - width;
4725 for (
unsigned offset = 0; offset <= maxOffset; offset += 8) {
4726 if (Mask.isSubsetOf(newMask)) {
4727 unsigned ptrOffset =
4729 unsigned IsFast = 0;
4732 *DAG.
getContext(), Layout, newVT, Lod->getAddressSpace(),
4733 NewAlign, Lod->getMemOperand()->getFlags(), &IsFast) &&
4735 bestOffset = ptrOffset / 8;
4736 bestMask = Mask.lshr(offset);
4750 if (bestOffset != 0)
4754 Lod->getPointerInfo().getWithOffset(bestOffset),
4755 Lod->getOriginalAlign());
4834 ExtDstTy != ExtSrcTy &&
"Unexpected types!");
4841 return DAG.
getSetCC(dl, VT, ZextOp,
4843 }
else if ((N1C->isZero() || N1C->isOne()) &&
4890 return DAG.
getSetCC(dl, VT, Val, N1,
4893 }
else if (N1C->isOne()) {
4930 cast<VTSDNode>(Op0.
getOperand(1))->getVT() == MVT::i1)
4961 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1,
Cond, DCI, dl))
4968 const APInt &C1 = N1C->getAPIntValue();
4970 APInt MinVal, MaxVal;
4992 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5012 (!N1C->isOpaque() || (
C.getBitWidth() <= 64 &&
5060 if (
SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
5061 VT, N0, N1,
Cond, DCI, dl))
5068 bool CmpZero = N1C->isZero();
5069 bool CmpNegOne = N1C->isAllOnes();
5070 if ((CmpZero || CmpNegOne) && N0.
hasOneUse()) {
5073 unsigned EltBits = V.getScalarValueSizeInBits();
5074 if (V.getOpcode() !=
ISD::OR || (EltBits % 2) != 0)
5081 isa<ConstantSDNode>(
RHS.getOperand(1)) &&
5082 RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5085 Hi =
RHS.getOperand(0);
5089 isa<ConstantSDNode>(
LHS.getOperand(1)) &&
5090 LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
5093 Hi =
LHS.getOperand(0);
5101 unsigned HalfBits = EltBits / 2;
5112 if (IsConcat(N0,
Lo,
Hi))
5113 return MergeConcat(
Lo,
Hi);
5150 if (
auto *N1C = dyn_cast<ConstantSDNode>(N1.
getNode())) {
5151 const APInt &C1 = N1C->getAPIntValue();
5163 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5166 unsigned ShCt = AndRHS->getAPIntValue().logBase2();
5167 if (AndRHS->getAPIntValue().isPowerOf2() &&
5174 }
else if (
Cond ==
ISD::SETEQ && C1 == AndRHS->getAPIntValue()) {
5193 if (
auto *AndRHS = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5194 const APInt &AndRHSC = AndRHS->getAPIntValue();
5231 return DAG.
getSetCC(dl, VT, Shift, CmpRHS, NewCond);
5237 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) {
5238 auto *CFP = cast<ConstantFPSDNode>(N1);
5239 assert(!CFP->getValueAPF().isNaN() &&
"Unexpected NaN value");
5260 !
isFPImmLegal(CFP->getValueAPF(), CFP->getValueType(0))) {
5279 if (CFP->getValueAPF().isInfinity()) {
5280 bool IsNegInf = CFP->getValueAPF().isNegative();
5291 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5300 "Integer types should be handled by FoldSetCC");
5306 if (UOF ==
unsigned(EqTrue))
5311 if (NewCond !=
Cond &&
5314 return DAG.
getSetCC(dl, VT, N0, N1, NewCond);
5321 if ((isSignedIntSetCC(
Cond) || isUnsignedIntSetCC(
Cond)) &&
5358 bool LegalRHSImm =
false;
5360 if (
auto *RHSC = dyn_cast<ConstantSDNode>(N1)) {
5361 if (
auto *LHSR = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) {
5366 DAG.
getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(),
5374 DAG.
getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(),
5380 if (
auto *SUBC = dyn_cast<ConstantSDNode>(N0.
getOperand(0)))
5384 DAG.
getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(),
5389 if (RHSC->getValueType(0).getSizeInBits() <= 64)
5398 if (
SDValue V = foldSetCCWithBinOp(VT, N0, N1,
Cond, dl, DCI))
5404 if (
SDValue V = foldSetCCWithBinOp(VT, N1, N0,
Cond, dl, DCI))
5407 if (
SDValue V = foldSetCCWithAnd(VT, N0, N1,
Cond, dl, DCI))
5418 if (
SDValue Folded = buildUREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5421 if (
SDValue Folded = buildSREMEqFold(VT, N0, N1,
Cond, DCI, dl))
5434 N0 = DAG.
getNOT(dl, Temp, OpVT);
5443 Temp = DAG.
getNOT(dl, N0, OpVT);
5450 Temp = DAG.
getNOT(dl, N1, OpVT);
5457 Temp = DAG.
getNOT(dl, N0, OpVT);
5464 Temp = DAG.
getNOT(dl, N1, OpVT);
5473 N0 = DAG.
getNode(ExtendCode, dl, VT, N0);
5489 if (
auto *GASD = dyn_cast<GlobalAddressSDNode>(
N)) {
5490 GA = GASD->getGlobal();
5491 Offset += GASD->getOffset();
5499 if (
auto *V = dyn_cast<ConstantSDNode>(N2)) {
5500 Offset += V->getSExtValue();
5504 if (
auto *V = dyn_cast<ConstantSDNode>(N1)) {
5505 Offset += V->getSExtValue();
5526 unsigned S = Constraint.
size();
5529 switch (Constraint[0]) {
5532 return C_RegisterClass;
5560 if (S > 1 && Constraint[0] ==
'{' && Constraint[S - 1] ==
'}') {
5561 if (S == 8 && Constraint.
substr(1, 6) ==
"memory")
5589 std::vector<SDValue> &Ops,
5592 if (Constraint.
size() > 1)
5595 char ConstraintLetter = Constraint[0];
5596 switch (ConstraintLetter) {
5612 if ((
C = dyn_cast<ConstantSDNode>(
Op)) && ConstraintLetter !=
's') {
5616 bool IsBool =
C->getConstantIntValue()->getBitWidth() == 1;
5626 if (ConstraintLetter !=
'n') {
5627 if (
const auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
5629 GA->getValueType(0),
5630 Offset + GA->getOffset()));
5633 if (
const auto *BA = dyn_cast<BlockAddressSDNode>(
Op)) {
5635 BA->getBlockAddress(), BA->getValueType(0),
5636 Offset + BA->getOffset(), BA->getTargetFlags()));
5639 if (isa<BasicBlockSDNode>(
Op)) {
5644 const unsigned OpCode =
Op.getOpcode();
5646 if ((
C = dyn_cast<ConstantSDNode>(
Op.getOperand(0))))
5647 Op =
Op.getOperand(1);
5650 (
C = dyn_cast<ConstantSDNode>(
Op.getOperand(1))))
5651 Op =
Op.getOperand(0);
5668std::pair<unsigned, const TargetRegisterClass *>
5674 assert(*(Constraint.
end() - 1) ==
'}' &&
"Not a brace enclosed constraint?");
5679 std::pair<unsigned, const TargetRegisterClass *> R =
5691 std::pair<unsigned, const TargetRegisterClass *> S =
5692 std::make_pair(PR, RC);
5714 assert(!ConstraintCode.empty() &&
"No known constraint!");
5715 return isdigit(
static_cast<unsigned char>(ConstraintCode[0]));
5721 assert(!ConstraintCode.empty() &&
"No known constraint!");
5722 return atoi(ConstraintCode.c_str());
5736 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
5737 unsigned maCount = 0;
5743 unsigned LabelNo = 0;
5746 ConstraintOperands.emplace_back(std::move(CI));
5756 switch (OpInfo.
Type) {
5766 assert(!Call.getType()->isVoidTy() &&
"Bad inline asm!");
5767 if (
auto *STy = dyn_cast<StructType>(Call.getType())) {
5772 assert(ResNo == 0 &&
"Asm only has one result!");
5782 OpInfo.
CallOperandVal = cast<CallBrInst>(&Call)->getIndirectDest(LabelNo);
5793 OpTy = Call.getParamElementType(ArgNo);
5794 assert(OpTy &&
"Indirect operand must have elementtype attribute");
5798 if (
StructType *STy = dyn_cast<StructType>(OpTy))
5799 if (STy->getNumElements() == 1)
5800 OpTy = STy->getElementType(0);
5805 unsigned BitSize =
DL.getTypeSizeInBits(OpTy);
5826 if (!ConstraintOperands.empty()) {
5828 unsigned bestMAIndex = 0;
5829 int bestWeight = -1;
5835 for (maIndex = 0; maIndex < maCount; ++maIndex) {
5837 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
5838 cIndex != eIndex; ++cIndex) {
5859 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
5864 weightSum += weight;
5867 if (weightSum > bestWeight) {
5868 bestWeight = weightSum;
5869 bestMAIndex = maIndex;
5876 cInfo.selectAlternative(bestMAIndex);
5881 for (
unsigned cIndex = 0, eIndex = ConstraintOperands.size();
5882 cIndex != eIndex; ++cIndex) {
5893 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
5896 std::pair<unsigned, const TargetRegisterClass *> InputRC =
5903 if ((OutOpIsIntOrFP != InOpIsIntOrFP) ||
5904 (MatchRC.second != InputRC.second)) {
5906 " with a matching output constraint of"
5907 " incompatible type!");
5913 return ConstraintOperands;
5948 if (maIndex >= (
int)
info.multipleAlternatives.size())
5949 rCodes = &
info.Codes;
5951 rCodes = &
info.multipleAlternatives[maIndex].Codes;
5955 for (
const std::string &rCode : *rCodes) {
5957 getSingleConstraintMatchWeight(
info, rCode.c_str());
5958 if (weight > BestWeight)
5959 BestWeight = weight;
5972 Value *CallOperandVal =
info.CallOperandVal;
5975 if (!CallOperandVal)
5978 switch (*constraint) {
5981 if (isa<ConstantInt>(CallOperandVal))
5982 weight = CW_Constant;
5985 if (isa<GlobalValue>(CallOperandVal))
5986 weight = CW_Constant;
5990 if (isa<ConstantFP>(CallOperandVal))
5991 weight = CW_Constant;
6004 weight = CW_Register;
6008 weight = CW_Default;
6042 Ret.reserve(OpInfo.
Codes.size());
6057 Ret.emplace_back(Code, CType);
6062 return getConstraintPiority(a.second) > getConstraintPiority(b.second);
6076 "need immediate or other");
6081 std::vector<SDValue> ResultOps;
6083 return !ResultOps.empty();
6091 assert(!OpInfo.
Codes.empty() &&
"Must have at least one constraint");
6094 if (OpInfo.
Codes.size() == 1) {
6102 unsigned BestIdx = 0;
6103 for (
const unsigned E =
G.size();
6110 if (BestIdx + 1 == E) {
6126 if (isa<ConstantInt>(v) || isa<Function>(v)) {
6130 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) {
6137 if (
const char *Repl = LowerXConstraint(OpInfo.
ConstraintVT)) {
6152 EVT VT =
N->getValueType(0);
6157 bool UseSRA =
false;
6163 APInt Divisor =
C->getAPIntValue();
6185 "Expected matchUnaryPredicate to return one element for scalable "
6190 assert(isa<ConstantSDNode>(Op1) &&
"Expected a constant");
6192 Factor = Factors[0];
6210 EVT VT =
N->getValueType(0);
6215 bool UseSRL =
false;
6221 APInt Divisor =
C->getAPIntValue();
6246 "Expected matchUnaryPredicate to return one element for scalable "
6251 assert(isa<ConstantSDNode>(Op1) &&
"Expected a constant");
6253 Factor = Factors[0];
6296 EVT VT =
N->getValueType(0);
6332 bool IsAfterLegalization,
6333 bool IsAfterLegalTypes,
6336 EVT VT =
N->getValueType(0);
6362 if (
N->getFlags().hasExact())
6371 const APInt &Divisor =
C->getAPIntValue();
6373 int NumeratorFactor = 0;
6384 NumeratorFactor = 1;
6387 NumeratorFactor = -1;
6404 SDValue MagicFactor, Factor, Shift, ShiftMask;
6412 Shifts.
size() == 1 && ShiftMasks.
size() == 1 &&
6413 "Expected matchUnaryPredicate to return one element for scalable "
6420 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6421 MagicFactor = MagicFactors[0];
6422 Factor = Factors[0];
6424 ShiftMask = ShiftMasks[0];
6470 SDValue Q = GetMULHS(N0, MagicFactor);
6500 bool IsAfterLegalization,
6501 bool IsAfterLegalTypes,
6504 EVT VT =
N->getValueType(0);
6530 if (
N->getFlags().hasExact())
6540 bool UseNPQ =
false, UsePreShift =
false, UsePostShift =
false;
6546 const APInt& Divisor =
C->getAPIntValue();
6548 SDValue PreShift, MagicFactor, NPQFactor, PostShift;
6552 if (Divisor.
isOne()) {
6553 PreShift = PostShift = DAG.
getUNDEF(ShSVT);
6554 MagicFactor = NPQFactor = DAG.
getUNDEF(SVT);
6558 Divisor, std::min(KnownLeadingZeros, Divisor.
countl_zero()));
6563 "We shouldn't generate an undefined shift!");
6565 "We shouldn't generate an undefined shift!");
6567 "Unexpected pre-shift");
6574 UseNPQ |= magics.
IsAdd;
6575 UsePreShift |= magics.
PreShift != 0;
6590 SDValue PreShift, PostShift, MagicFactor, NPQFactor;
6598 NPQFactors.
size() == 1 && PostShifts.
size() == 1 &&
6599 "Expected matchUnaryPredicate to return one for scalable vectors");
6605 assert(isa<ConstantSDNode>(N1) &&
"Expected a constant");
6606 PreShift = PreShifts[0];
6607 MagicFactor = MagicFactors[0];
6608 PostShift = PostShifts[0];
6660 Q = GetMULHU(Q, MagicFactor);
6673 NPQ = GetMULHU(NPQ, NPQFactor);
6692 return DAG.
getSelect(dl, VT, IsOne, N0, Q);
6701 std::function<
bool(
SDValue)> Predicate,
6706 if (SplatValue != Values.
end()) {
6709 return Value == *SplatValue || Predicate(
Value);
6711 Replacement = *SplatValue;
6715 if (!AlternativeReplacement)
6718 Replacement = AlternativeReplacement;
6720 std::replace_if(Values.
begin(), Values.
end(), Predicate, Replacement);
6731 DAGCombinerInfo &DCI,
6734 if (
SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6737 DCI.AddToWorklist(
N);
6745TargetLowering::prepareUREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6747 DAGCombinerInfo &DCI,
const SDLoc &
DL,
6755 "Only applicable for (in)equality comparisons.");
6768 bool ComparingWithAllZeros =
true;
6769 bool AllComparisonsWithNonZerosAreTautological =
true;
6770 bool HadTautologicalLanes =
false;
6771 bool AllLanesAreTautological =
true;
6772 bool HadEvenDivisor =
false;
6773 bool AllDivisorsArePowerOfTwo =
true;
6774 bool HadTautologicalInvertedLanes =
false;
6783 const APInt &
Cmp = CCmp->getAPIntValue();
6785 ComparingWithAllZeros &=
Cmp.isZero();
6791 bool TautologicalInvertedLane =
D.ule(Cmp);
6792 HadTautologicalInvertedLanes |= TautologicalInvertedLane;
6797 bool TautologicalLane =
D.isOne() || TautologicalInvertedLane;
6798 HadTautologicalLanes |= TautologicalLane;
6799 AllLanesAreTautological &= TautologicalLane;
6805 AllComparisonsWithNonZerosAreTautological &= TautologicalLane;
6808 unsigned K =
D.countr_zero();
6809 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
6813 HadEvenDivisor |= (
K != 0);
6816 AllDivisorsArePowerOfTwo &= D0.
isOne();
6820 unsigned W =
D.getBitWidth();
6822 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
6835 "We are expecting that K is always less than all-ones for ShSVT");
6838 if (TautologicalLane) {
6864 if (AllLanesAreTautological)
6869 if (AllDivisorsArePowerOfTwo)
6874 if (HadTautologicalLanes) {
6889 "Expected matchBinaryPredicate to return one element for "
6900 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) {
6904 "Expecting that the types on LHS and RHS of comparisons match.");
6914 if (HadEvenDivisor) {
6927 if (!HadTautologicalInvertedLanes)
6933 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
6940 SDValue TautologicalInvertedChannels =
6950 DL, SETCCVT, SETCCVT);
6952 Replacement, NewCC);
6960 TautologicalInvertedChannels);
6973 DAGCombinerInfo &DCI,
6976 if (
SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode,
Cond,
6978 assert(Built.
size() <= 7 &&
"Max size prediction failed.");
6980 DCI.AddToWorklist(
N);
6988TargetLowering::prepareSREMEqFold(
EVT SETCCVT,
SDValue REMNode,
6990 DAGCombinerInfo &DCI,
const SDLoc &
DL,
7015 "Only applicable for (in)equality comparisons.");
7031 if (!CompTarget || !CompTarget->
isZero())
7034 bool HadIntMinDivisor =
false;
7035 bool HadOneDivisor =
false;
7036 bool AllDivisorsAreOnes =
true;
7037 bool HadEvenDivisor =
false;
7038 bool NeedToApplyOffset =
false;
7039 bool AllDivisorsArePowerOfTwo =
true;
7054 HadIntMinDivisor |=
D.isMinSignedValue();
7057 HadOneDivisor |=
D.isOne();
7058 AllDivisorsAreOnes &=
D.isOne();
7061 unsigned K =
D.countr_zero();
7062 assert((!
D.isOne() || (K == 0)) &&
"For divisor '1' we won't rotate.");
7065 if (!
D.isMinSignedValue()) {
7068 HadEvenDivisor |= (
K != 0);
7073 AllDivisorsArePowerOfTwo &= D0.
isOne();
7077 unsigned W =
D.getBitWidth();
7079 assert((D0 *
P).isOne() &&
"Multiplicative inverse basic check failed.");
7085 if (!
D.isMinSignedValue()) {
7088 NeedToApplyOffset |=
A != 0;
7095 "We are expecting that A is always less than all-ones for SVT");
7097 "We are expecting that K is always less than all-ones for ShSVT");
7137 if (AllDivisorsAreOnes)
7142 if (AllDivisorsArePowerOfTwo)
7145 SDValue PVal, AVal, KVal, QVal;
7147 if (HadOneDivisor) {
7167 QAmts.
size() == 1 &&
7168 "Expected matchUnaryPredicate to return one element for scalable "
7175 assert(isa<ConstantSDNode>(
D) &&
"Expected a constant");
7186 if (NeedToApplyOffset) {
7198 if (HadEvenDivisor) {
7213 if (!HadIntMinDivisor)
7219 assert(VT.
isVector() &&
"Can/should only get here for vectors.");
7254 MaskedIsZero, Fold);
7261 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
7263 "be a constant integer");
7273 EVT VT =
Op.getValueType();
7296 bool LegalOps,
bool OptForSize,
7298 unsigned Depth)
const {
7300 if (
Op.getOpcode() ==
ISD::FNEG ||
Op.getOpcode() == ISD::VP_FNEG) {
7302 return Op.getOperand(0);
7313 EVT VT =
Op.getValueType();
7314 unsigned Opcode =
Op.getOpcode();
7324 auto RemoveDeadNode = [&](
SDValue N) {
7325 if (
N &&
N.getNode()->use_empty())
7334 std::list<HandleSDNode> Handles;
7345 if (LegalOps && !IsOpLegal)
7348 APFloat V = cast<ConstantFPSDNode>(
Op)->getValueAPF();
7362 return !N.isUndef() && !isa<ConstantFPSDNode>(N);
7370 return N.isUndef() ||
7375 if (LegalOps && !IsOpLegal)
7384 APFloat V = cast<ConstantFPSDNode>(
C)->getValueAPF();
7392 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7403 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7406 Handles.emplace_back(NegX);
7411 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7417 if (NegX && (CostX <= CostY)) {
7421 RemoveDeadNode(NegY);
7430 RemoveDeadNode(NegX);
7437 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7459 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7462 Handles.emplace_back(NegX);
7467 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7473 if (NegX && (CostX <= CostY)) {
7477 RemoveDeadNode(NegY);
7483 if (
C->isExactlyValue(2.0) &&
Op.getOpcode() ==
ISD::FMUL)
7491 RemoveDeadNode(NegX);
7498 if (!
Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros())
7501 SDValue X =
Op.getOperand(0),
Y =
Op.getOperand(1), Z =
Op.getOperand(2);
7504 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ,
Depth);
7510 Handles.emplace_back(NegZ);
7515 getNegatedExpression(
X, DAG, LegalOps, OptForSize, CostX,
Depth);
7518 Handles.emplace_back(NegX);
7523 getNegatedExpression(
Y, DAG, LegalOps, OptForSize, CostY,
Depth);
7529 if (NegX && (CostX <= CostY)) {
7530 Cost = std::min(CostX, CostZ);
7533 RemoveDeadNode(NegY);
7539 Cost = std::min(CostY, CostZ);
7542 RemoveDeadNode(NegX);
7550 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7552 return DAG.
getNode(Opcode,
DL, VT, NegV);
7555 if (
SDValue NegV = getNegatedExpression(
Op.getOperand(0), DAG, LegalOps,
7566 getNegatedExpression(
LHS, DAG, LegalOps, OptForSize, CostLHS,
Depth);
7568 RemoveDeadNode(NegLHS);
7573 Handles.emplace_back(NegLHS);
7578 getNegatedExpression(
RHS, DAG, LegalOps, OptForSize, CostRHS,
Depth);
7586 RemoveDeadNode(NegLHS);
7587 RemoveDeadNode(NegRHS);
7591 Cost = std::min(CostLHS, CostRHS);
7592 return DAG.
getSelect(
DL, VT,
Op.getOperand(0), NegLHS, NegRHS);
7621 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI)
7634 if ((
Signed && HasSMUL_LOHI) || (!
Signed && HasUMUL_LOHI)) {
7662 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false)) {
7663 Result.push_back(
Lo);
7664 Result.push_back(
Hi);
7667 Result.push_back(Zero);
7668 Result.push_back(Zero);
7679 if (MakeMUL_LOHI(LL, RL,
Lo,
Hi,
true)) {
7680 Result.push_back(
Lo);
7681 Result.push_back(
Hi);
7686 unsigned ShiftAmount = OuterBitSize - InnerBitSize;
7701 if (!MakeMUL_LOHI(LL, RL,
Lo,
Hi,
false))
7704 Result.push_back(
Lo);
7711 Result.push_back(
Hi);
7724 if (!MakeMUL_LOHI(LL, RH,
Lo,
Hi,
false))
7731 if (!MakeMUL_LOHI(LH, RL,
Lo,
Hi,
false))
7783 bool Ok = expandMUL_LOHI(
N->getOpcode(),
N->getValueType(0),
SDLoc(
N),
7784 N->getOperand(0),
N->getOperand(1), Result, HiLoVT,
7785 DAG, Kind, LL, LH, RL, RH);
7787 assert(Result.size() == 2);
7819 unsigned Opcode =
N->getOpcode();
7820 EVT VT =
N->getValueType(0);
7827 "Unexpected opcode");
7829 auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7833 APInt Divisor = CN->getAPIntValue();
7841 if (Divisor.
uge(HalfMaxPlus1))
7859 unsigned TrailingZeros = 0;
7873 if (HalfMaxPlus1.
urem(Divisor).
isOne()) {
7874 assert(!LL == !LH &&
"Expected both input halves or no input halves!");
7876 std::tie(LL, LH) = DAG.
SplitScalar(
N->getOperand(0), dl, HiLoVT, HiLoVT);
7880 if (TrailingZeros) {
7948 std::tie(QuotL, QuotH) = DAG.
SplitScalar(Quotient, dl, HiLoVT, HiLoVT);
7949 Result.push_back(QuotL);
7950 Result.push_back(QuotH);
7956 if (TrailingZeros) {
7962 Result.push_back(RemL);
7978 EVT VT =
Node->getValueType(0);
7988 bool IsFSHL =
Node->getOpcode() == ISD::VP_FSHL;
7991 EVT ShVT = Z.getValueType();
7997 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
7998 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitWidthC, ShAmt, Mask, VL);
7999 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, IsFSHL ? ShAmt : InvShAmt, Mask,
8001 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, IsFSHL ? InvShAmt : ShAmt, Mask,
8009 ShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, Z, BitMask, Mask, VL);
8013 InvShAmt = DAG.
getNode(ISD::VP_AND,
DL, ShVT, NotZ, BitMask, Mask, VL);
8016 ShAmt = DAG.
getNode(ISD::VP_UREM,
DL, ShVT, Z, BitWidthC, Mask, VL);
8017 InvShAmt = DAG.
getNode(ISD::VP_SUB,
DL, ShVT, BitMask, ShAmt, Mask, VL);
8022 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT,
X, ShAmt, Mask, VL);
8024 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT, ShY1, InvShAmt, Mask, VL);
8027 ShX = DAG.
getNode(ISD::VP_SHL,
DL, VT, ShX1, InvShAmt, Mask, VL);
8028 ShY = DAG.
getNode(ISD::VP_SRL,
DL, VT,
Y, ShAmt, Mask, VL);
8031 return DAG.
getNode(ISD::VP_OR,
DL, VT, ShX, ShY, Mask, VL);
8036 if (Node->isVPOpcode())
8039 EVT VT = Node->getValueType(0);
8049 SDValue Z = Node->getOperand(2);
8052 bool IsFSHL = Node->getOpcode() ==
ISD::FSHL;
8055 EVT ShVT = Z.getValueType();
8125 EVT VT = Node->getValueType(0);
8127 bool IsLeft = Node->getOpcode() ==
ISD::ROTL;
8128 SDValue Op0 = Node->getOperand(0);
8129 SDValue Op1 = Node->getOperand(1);
8140 return DAG.
getNode(RevRot,
DL, VT, Op0, Sub);
8143 if (!AllowVectorOps && VT.
isVector() &&
8161 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8163 HsVal = DAG.
getNode(HsOpc,
DL, VT, Op0, HsAmt);
8169 ShVal = DAG.
getNode(ShOpc,
DL, VT, Op0, ShAmt);
8180 assert(Node->getNumOperands() == 3 &&
"Not a double-shift!");
8181 EVT VT = Node->getValueType(0);
8187 SDValue ShOpLo = Node->getOperand(0);
8188 SDValue ShOpHi = Node->getOperand(1);
8189 SDValue ShAmt = Node->getOperand(2);
8232 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8233 SDValue Src = Node->getOperand(OpNo);
8234 EVT SrcVT = Src.getValueType();
8235 EVT DstVT = Node->getValueType(0);
8239 if (SrcVT != MVT::f32 || DstVT != MVT::i64)
8242 if (Node->isStrictFPOpcode())
8305 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0;
8306 SDValue Src = Node->getOperand(OpNo);
8308 EVT SrcVT = Src.getValueType();
8309 EVT DstVT = Node->getValueType(0);
8330 if (Node->isStrictFPOpcode()) {
8332 { Node->getOperand(0), Src });
8333 Chain = Result.getValue(1);
8347 if (Node->isStrictFPOpcode()) {
8349 Node->getOperand(0),
true);
8355 bool Strict = Node->isStrictFPOpcode() ||
8374 if (Node->isStrictFPOpcode()) {
8376 { Chain, Src, FltOfs });
8398 Result = DAG.
getSelect(dl, DstVT, Sel, True, False);
8408 if (Node->isStrictFPOpcode())
8411 SDValue Src = Node->getOperand(0);
8412 EVT SrcVT = Src.getValueType();
8413 EVT DstVT = Node->getValueType(0);
8417 if (Node->getFlags().hasNonNeg() &&
8446 llvm::bit_cast<double>(UINT64_C(0x4530000000100000)), dl, DstVT);
8465 unsigned Opcode = Node->getOpcode();
8470 if (Node->getFlags().hasNoNaNs()) {
8472 EVT VT = Node->getValueType(0);
8477 SDValue Op1 = Node->getOperand(0);
8478 SDValue Op2 = Node->getOperand(1);
8491 if (
SDValue Expanded = expandVectorNaryOpBySplitting(Node, DAG))
8494 EVT VT = Node->getValueType(0);
8497 "Expanding fminnum/fmaxnum for scalable vectors is undefined.");
8504 SDValue Quiet0 = Node->getOperand(0);
8505 SDValue Quiet1 = Node->getOperand(1);
8507 if (!Node->getFlags().hasNoNaNs()) {
8520 return DAG.
getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags());
8526 if ((Node->getFlags().hasNoNaNs() ||
8529 (Node->getFlags().hasNoSignedZeros() ||
8532 unsigned IEEE2018Op =
8535 return DAG.
getNode(IEEE2018Op, dl, VT, Node->getOperand(0),
8536 Node->getOperand(1), Node->getFlags());
8539 if (
SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG))
8547 if (
SDValue Expanded = expandVectorNaryOpBySplitting(
N, DAG))
8553 unsigned Opc =
N->getOpcode();
8554 EVT VT =
N->getValueType(0);
8567 bool MinMaxMustRespectOrderedZero =
false;
8571 MinMaxMustRespectOrderedZero =
true;
8585 if (!
N->getFlags().hasNoNaNs() &&
8594 if (!MinMaxMustRespectOrderedZero && !
N->getFlags().hasNoSignedZeros() &&
8617 unsigned Opc = Node->getOpcode();
8618 EVT VT = Node->getValueType(0);
8628 if (!Flags.hasNoNaNs()) {
8644 if (Flags.hasNoNaNs() ||
8646 unsigned IEEE2019Op =
8654 if ((Flags.hasNoNaNs() ||
8683 if (
Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros() ||
8708 bool IsOrdered = NanTest ==
fcNone;
8709 bool IsUnordered = NanTest ==
fcNan;
8712 if (!IsOrdered && !IsUnordered)
8713 return std::nullopt;
8715 if (OrderedMask ==
fcZero &&
8721 return std::nullopt;
8728 EVT OperandVT =
Op.getValueType();
8740 if (OperandVT == MVT::ppcf128) {
8743 OperandVT = MVT::f64;
8750 bool IsF80 = (ScalarFloatVT == MVT::f80);
8754 if (Flags.hasNoFPExcept() &&
8757 bool IsInvertedFP =
false;
8761 FPTestMask = InvertedFPCheck;
8762 IsInvertedFP =
true;
8769 FPClassTest OrderedFPTestMask = FPTestMask & ~fcNan;
8774 OrderedFPTestMask = FPTestMask;
8776 const bool IsOrdered = FPTestMask == OrderedFPTestMask;
8778 if (std::optional<bool> IsCmp0 =
8781 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode,
8788 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode);
8791 if (FPTestMask ==
fcNan &&
8797 bool IsOrderedInf = FPTestMask ==
fcInf;
8800 : UnorderedCmpOpcode,
8811 IsOrderedInf ? OrderedCmpOpcode : UnorderedCmpOpcode);
8816 : UnorderedCmpOpcode,
8827 IsOrdered ? OrderedCmpOpcode : UnorderedCmpOpcode);
8846 return DAG.
getSetCC(
DL, ResultVT, Abs, SmallestNormal,
8847 IsOrdered ? OrderedOp : UnorderedOp);
8870 DAG.
getSetCC(
DL, ResultVT, Abs, SmallestNormal, IsNormalOp);
8872 return DAG.
getNode(LogicOp,
DL, ResultVT, IsFinite, IsNormal);
8879 bool IsInverted =
false;
8882 Test = InvertedCheck;
8898 const unsigned ExplicitIntBitInF80 = 63;
8899 APInt ExpMask = Inf;
8901 ExpMask.
clearBit(ExplicitIntBitInF80);
8915 const auto appendResult = [&](
SDValue PartialRes) {
8925 const auto getIntBitIsSet = [&]() ->
SDValue {
8926 if (!IntBitIsSetV) {
8927 APInt IntBitMask(BitSize, 0);
8928 IntBitMask.
setBit(ExplicitIntBitInF80);
8933 return IntBitIsSetV;
8954 Test &= ~fcPosFinite;
8959 Test &= ~fcNegFinite;
8961 appendResult(PartialRes);
8970 appendResult(ExpIsZero);
8980 else if (PartialCheck ==
fcZero)
8984 appendResult(PartialRes);
8997 appendResult(PartialRes);
9000 if (
unsigned PartialCheck =
Test &
fcInf) {
9003 else if (PartialCheck ==
fcInf)
9010 appendResult(PartialRes);
9013 if (
unsigned PartialCheck =
Test &
fcNan) {
9014 APInt InfWithQnanBit = Inf | QNaNBitMask;
9016 if (PartialCheck ==
fcNan) {
9029 }
else if (PartialCheck ==
fcQNan) {
9041 appendResult(PartialRes);
9046 APInt ExpLSB = ExpMask & ~(ExpMask.
shl(1));
9049 APInt ExpLimit = ExpMask - ExpLSB;
9062 appendResult(PartialRes);
9085 EVT VT = Node->getValueType(0);
9092 if (!(Len <= 128 && Len % 8 == 0))
9151 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
9162 EVT VT = Node->getValueType(0);
9165 SDValue Mask = Node->getOperand(1);
9166 SDValue VL = Node->getOperand(2);
9171 if (!(Len <= 128 && Len % 8 == 0))
9183 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5;
9186 Tmp1 = DAG.
getNode(ISD::VP_AND, dl, VT,
9190 Op = DAG.
getNode(ISD::VP_SUB, dl, VT,
Op, Tmp1, Mask, VL);
9193 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op, Mask33, Mask, VL);
9194 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT,
9198 Op = DAG.
getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL);
9203 Tmp5 = DAG.
getNode(ISD::VP_ADD, dl, VT,
Op, Tmp4, Mask, VL);
9204 Op = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL);
9215 V = DAG.
getNode(ISD::VP_MUL, dl, VT,
Op, Mask01, Mask, VL);
9218 for (
unsigned Shift = 8; Shift < Len; Shift *= 2) {
9220 V = DAG.
getNode(ISD::VP_ADD, dl, VT, V,
9221 DAG.
getNode(ISD::VP_SHL, dl, VT, V, ShiftC, Mask, VL),
9231 EVT VT = Node->getValueType(0);
9270 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
9281 EVT VT = Node->getValueType(0);
9284 SDValue Mask = Node->getOperand(1);
9285 SDValue VL = Node->getOperand(2);
9295 for (
unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
9298 DAG.
getNode(ISD::VP_SRL, dl, VT,
Op, Tmp, Mask, VL), Mask,
9303 return DAG.
getNode(ISD::VP_CTPOP, dl, VT,
Op, Mask, VL);
9312 :
APInt(64, 0x0218A392CD3D5DBFULL);
9326 for (
unsigned i = 0; i <
BitWidth; i++) {
9352 EVT VT = Node->getValueType(0);
9386 if (
SDValue V = CTTZTableLookup(Node, DAG, dl, VT,
Op, NumBitsPerElt))
9408 SDValue Mask = Node->getOperand(1);
9409 SDValue VL = Node->getOperand(2);
9411 EVT VT = Node->getValueType(0);
9418 SDValue Tmp = DAG.
getNode(ISD::VP_AND, dl, VT, Not, MinusOne, Mask, VL);
9419 return DAG.
getNode(ISD::VP_CTPOP, dl, VT, Tmp, Mask, VL);
9433 EVT SrcVT = Source.getValueType();
9434 EVT ResVT =
N->getValueType(0);
9443 Source = DAG.
getNode(ISD::VP_SETCC,
DL, SrcVT, Source, AllZero,
9451 DAG.
getNode(ISD::VP_SELECT,
DL, ResVecVT, Source, StepVec,
Splat, EVL);
9452 return DAG.
getNode(ISD::VP_REDUCE_UMIN,
DL, ResVT, ExtEVL,
Select, Mask, EVL);
9459 EVT MaskVT = Mask.getValueType();
9469 true, &VScaleRange);
9493 bool IsNegative)
const {
9495 EVT VT =
N->getValueType(0);
9549 EVT VT =
N->getValueType(0);
9623 EVT VT =
N->getValueType(0);
9627 unsigned Opc =
N->getOpcode();
9636 "Unknown AVG node");
9648 return DAG.
getNode(ShiftOpc, dl, VT, Sum,
9700 return DAG.
getNode(SumOpc, dl, VT, Sign, Shift);
9705 EVT VT =
N->getValueType(0);
9712 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9763 EVT VT =
N->getValueType(0);
9772 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
9781 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp1, Tmp2, Mask, EVL);
9791 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9795 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9796 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9797 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9801 Tmp7 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9805 Tmp6 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9806 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9809 Tmp5 = DAG.
getNode(ISD::VP_AND, dl, VT,
Op,
9810 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
9815 Tmp4 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp4,
9816 DAG.
getConstant(255ULL << 24, dl, VT), Mask, EVL);
9819 Tmp3 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp3,
9820 DAG.
getConstant(255ULL << 16, dl, VT), Mask, EVL);
9823 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9827 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp7, Mask, EVL);
9828 Tmp6 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp6, Tmp5, Mask, EVL);
9829 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
9830 Tmp2 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
9831 Tmp8 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp6, Mask, EVL);
9832 Tmp4 = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL);
9833 return DAG.
getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp4, Mask, EVL);
9839 EVT VT =
N->getValueType(0);
9882 for (
unsigned I = 0, J = Sz-1;
I < Sz; ++
I, --J) {
9899 assert(
N->getOpcode() == ISD::VP_BITREVERSE);
9902 EVT VT =
N->getValueType(0);
9921 Tmp = (Sz > 8 ? DAG.
getNode(ISD::VP_BSWAP, dl, VT,
Op, Mask, EVL) :
Op);
9926 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9932 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9937 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9943 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9948 Tmp2 = DAG.
getNode(ISD::VP_AND, dl, VT, Tmp2,
9954 Tmp = DAG.
getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
9960std::pair<SDValue, SDValue>
9964 SDValue Chain = LD->getChain();
9965 SDValue BasePTR = LD->getBasePtr();
9966 EVT SrcVT = LD->getMemoryVT();
9967 EVT DstVT = LD->getValueType(0);
9999 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(),
10000 LD->getMemOperand()->getFlags(), LD->getAAInfo());
10003 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
10004 unsigned ShiftIntoIdx =
10015 Scalar = DAG.
getNode(ExtendOp, SL, DstEltVT, Scalar);
10022 return std::make_pair(
Value, Load.getValue(1));
10031 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
10033 DAG.
getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
10034 LD->getPointerInfo().getWithOffset(
Idx * Stride),
10035 SrcEltVT, LD->getOriginalAlign(),
10036 LD->getMemOperand()->getFlags(), LD->getAAInfo());
10047 return std::make_pair(
Value, NewChain);
10054 SDValue Chain = ST->getChain();
10055 SDValue BasePtr = ST->getBasePtr();
10057 EVT StVT = ST->getMemoryVT();
10083 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
10088 unsigned ShiftIntoIdx =
10097 return DAG.
getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
10098 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
10104 assert(Stride &&
"Zero stride!");
10108 for (
unsigned Idx = 0;
Idx < NumElem; ++
Idx) {
10117 Chain, SL, Elt,
Ptr, ST->getPointerInfo().getWithOffset(
Idx * Stride),
10118 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
10127std::pair<SDValue, SDValue>
10130 "unaligned indexed loads not implemented!");
10131 SDValue Chain = LD->getChain();
10133 EVT VT = LD->getValueType(0);
10134 EVT LoadedVT = LD->getMemoryVT();
10144 return scalarizeVectorLoad(LD, DAG);
10150 LD->getMemOperand());
10152 if (LoadedVT != VT)
10156 return std::make_pair(Result, newLoad.
getValue(1));
10164 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
10168 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.
getNode())->getIndex();
10170 SDValue StackPtr = StackBase;
10173 EVT PtrVT =
Ptr.getValueType();
10174 EVT StackPtrVT = StackPtr.getValueType();
10180 for (
unsigned i = 1; i < NumRegs; i++) {
10183 RegVT, dl, Chain,
Ptr, LD->getPointerInfo().getWithOffset(
Offset),
10184 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
10188 Load.getValue(1), dl, Load, StackPtr,
10199 8 * (LoadedBytes -
Offset));
10202 LD->getPointerInfo().getWithOffset(
Offset), MemVT,
10203 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
10209 Load.getValue(1), dl, Load, StackPtr,
10216 Load = DAG.
getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
10221 return std::make_pair(Load, TF);
10225 "Unaligned load of unsupported type.");
10234 Align Alignment = LD->getOriginalAlign();
10235 unsigned IncrementSize = NumBits / 8;
10246 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10251 LD->getPointerInfo().getWithOffset(IncrementSize),
10252 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10255 Hi = DAG.
getExtLoad(HiExtType, dl, VT, Chain,
Ptr, LD->getPointerInfo(),
10256 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10261 LD->getPointerInfo().getWithOffset(IncrementSize),
10262 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
10274 return std::make_pair(Result, TF);
10280 "unaligned indexed stores not implemented!");
10281 SDValue Chain = ST->getChain();
10283 SDValue Val = ST->getValue();
10285 Align Alignment = ST->getOriginalAlign();
10287 EVT StoreMemVT = ST->getMemoryVT();
10303 Result = DAG.
getStore(Chain, dl, Result,
Ptr, ST->getPointerInfo(),
10304 Alignment, ST->getMemOperand()->getFlags());
10312 EVT PtrVT =
Ptr.getValueType();
10315 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
10319 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
10323 Chain, dl, Val, StackPtr,
10326 EVT StackPtrVT = StackPtr.getValueType();
10334 for (
unsigned i = 1; i < NumRegs; i++) {
10337 RegVT, dl, Store, StackPtr,
10341 ST->getPointerInfo().getWithOffset(
Offset),
10342 ST->getOriginalAlign(),
10343 ST->getMemOperand()->getFlags()));
10363 ST->getPointerInfo().getWithOffset(
Offset), LoadMemVT,
10364 ST->getOriginalAlign(),
10365 ST->getMemOperand()->getFlags(), ST->getAAInfo()));
10372 "Unaligned store of unknown type.");
10376 unsigned IncrementSize = NumBits / 8;
10385 if (
auto *
C = dyn_cast<ConstantSDNode>(
Lo);
C && !
C->isOpaque())
10396 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
10397 ST->getMemOperand()->getFlags());
10402 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
10403 ST->getMemOperand()->getFlags(), ST->getAAInfo());
10414 bool IsCompressedMemory)
const {
10416 EVT AddrVT =
Addr.getValueType();
10417 EVT MaskVT = Mask.getValueType();
10419 "Incompatible types of Data and Mask");
10420 if (IsCompressedMemory) {
10423 "Cannot currently handle compressed memory with scalable vectors");
10429 MaskIntVT = MVT::i32;
10453 "Cannot index a scalable vector within a fixed-width vector");
10457 EVT IdxVT =
Idx.getValueType();
10463 if (
auto *IdxCst = dyn_cast<ConstantSDNode>(
Idx))
10464 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts)
10478 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0;
10486 return getVectorSubVecPointer(
10487 DAG, VecPtr, VecVT,
10505 "Converting bits to bytes lost precision");
10507 "Sub-vector must be a vector with matching element type");
10511 EVT IdxVT = Index.getValueType();
10543 assert(EmuTlsVar &&
"Cannot find EmuTlsVar ");
10545 Entry.Ty = VoidPtrType;
10546 Args.push_back(Entry);
10553 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
10562 "Emulated TLS must have zero offset in GlobalAddressSDNode");
10563 return CallResult.first;
10574 EVT VT =
Op.getOperand(0).getValueType();
10576 if (VT.
bitsLT(MVT::i32)) {
10590 SDValue Op0 = Node->getOperand(0);
10591 SDValue Op1 = Node->getOperand(1);
10594 unsigned Opcode = Node->getOpcode();
10636 {Op0, Op1, DAG.getCondCode(CC)})) {
10643 {Op0, Op1, DAG.getCondCode(CC)})) {
10671 unsigned Opcode = Node->getOpcode();
10674 EVT VT =
LHS.getValueType();
10677 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10693 unsigned OverflowOp;
10708 llvm_unreachable(
"Expected method to receive signed or unsigned saturation "
10709 "addition or subtraction node.");
10717 unsigned BitWidth =
LHS.getScalarValueSizeInBits();
10720 SDValue SumDiff = Result.getValue(0);
10721 SDValue Overflow = Result.getValue(1);
10743 return DAG.
getSelect(dl, VT, Overflow, Zero, SumDiff);
10763 if (LHSIsNonNegative || RHSIsNonNegative) {
10765 return DAG.
getSelect(dl, VT, Overflow, SatMax, SumDiff);
10771 if (LHSIsNegative || RHSIsNegative) {
10773 return DAG.
getSelect(dl, VT, Overflow, SatMin, SumDiff);
10783 return DAG.
getSelect(dl, VT, Overflow, Result, SumDiff);
10787 unsigned Opcode = Node->getOpcode();
10790 EVT VT =
LHS.getValueType();
10791 EVT ResVT = Node->getValueType(0);
10822 unsigned Opcode = Node->getOpcode();
10826 EVT VT =
LHS.getValueType();
10831 "Expected a SHLSAT opcode");
10832 assert(VT ==
RHS.getValueType() &&
"Expected operands to be the same type");
10870 if (WideVT == MVT::i16)
10871 LC = RTLIB::MUL_I16;
10872 else if (WideVT == MVT::i32)
10873 LC = RTLIB::MUL_I32;
10874 else if (WideVT == MVT::i64)
10875 LC = RTLIB::MUL_I64;
10876 else if (WideVT == MVT::i128)
10877 LC = RTLIB::MUL_I128;
10886 unsigned HalfBits = Bits >> 1;
10925 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.
getDataLayout())) {
10930 SDValue Args[] = {LL, LH, RL, RH};
10931 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
10933 SDValue Args[] = {LH, LL, RH, RL};
10934 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first;
10937 "Ret value is a collection of constituent nodes holding result.");
10940 Lo = Ret.getOperand(0);
10941 Hi = Ret.getOperand(1);
10943 Lo = Ret.getOperand(1);
10944 Hi = Ret.getOperand(0);
10953 EVT VT =
LHS.getValueType();
10954 assert(
RHS.getValueType() == VT &&
"Mismatching operand types");
10970 forceExpandWideMUL(DAG, dl,
Signed, WideVT,
LHS, HiLHS,
RHS, HiRHS,
Lo,
Hi);
10979 "Expected a fixed point multiplication opcode");
10984 EVT VT =
LHS.getValueType();
10985 unsigned Scale = Node->getConstantOperandVal(2);
11001 SDValue Product = Result.getValue(0);
11002 SDValue Overflow = Result.getValue(1);
11013 Result = DAG.
getSelect(dl, VT, ProdNeg, SatMin, SatMax);
11014 return DAG.
getSelect(dl, VT, Overflow, Result, Product);
11018 SDValue Product = Result.getValue(0);
11019 SDValue Overflow = Result.getValue(1);
11023 return DAG.
getSelect(dl, VT, Overflow, SatMax, Product);
11028 "Expected scale to be less than the number of bits if signed or at "
11029 "most the number of bits if unsigned.");
11031 "Expected both operands to be the same type");
11043 Lo = Result.getValue(0);
11044 Hi = Result.getValue(1);
11065 if (Scale == VTSize)
11111 return DAG.
getSelect(dl, VT, Overflow, ResultIfOverflow, Result);
11136 "Expected a fixed point division opcode");
11138 EVT VT =
LHS.getValueType();
11160 if (LHSLead + RHSTrail < Scale + (
unsigned)(Saturating &&
Signed))
11163 unsigned LHSShift = std::min(LHSLead, Scale);
11164 unsigned RHSShift = Scale - LHSShift;
11221 bool IsAdd = Node->getOpcode() ==
ISD::UADDO;
11227 SDValue NodeCarry = DAG.
getNode(OpcCarry, dl, Node->getVTList(),
11228 { LHS, RHS, CarryIn });
11237 EVT ResultType = Node->getValueType(1);
11248 DAG.
getSetCC(dl, SetCCType, Result,
11267 bool IsAdd = Node->getOpcode() ==
ISD::SADDO;
11272 EVT ResultType = Node->getValueType(1);
11298 DAG.
getNode(
ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl,
11299 ResultType, ResultType);
11305 EVT VT = Node->getValueType(0);
11313 const APInt &
C = RHSC->getAPIntValue();
11315 if (
C.isPowerOf2()) {
11317 bool UseArithShift =
isSigned && !
C.isMinSignedValue();
11320 Overflow = DAG.
getSetCC(dl, SetCCVT,
11322 dl, VT, Result, ShiftAmt),
11335 static const unsigned Ops[2][3] =
11358 forceExpandWideMUL(DAG, dl,
isSigned,
LHS,
RHS, BottomHalf, TopHalf);
11361 Result = BottomHalf;
11368 Overflow = DAG.
getSetCC(dl, SetCCVT, TopHalf,
11373 EVT RType = Node->getValueType(1);
11378 "Unexpected result type for S/UMULO legalization");
11386 EVT VT =
Op.getValueType();
11390 "Expanding reductions for scalable vectors is undefined.");
11401 Op = DAG.
getNode(BaseOpcode, dl, HalfVT,
Lo,
Hi, Node->getFlags());
11413 for (
unsigned i = 1; i < NumElts; i++)
11414 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags());
11417 if (EltVT != Node->getValueType(0))
11424 SDValue AccOp = Node->getOperand(0);
11425 SDValue VecOp = Node->getOperand(1);
11433 "Expanding reductions for scalable vectors is undefined.");
11443 for (
unsigned i = 0; i < NumElts; i++)
11444 Res = DAG.
getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags);
11451 EVT VT = Node->getValueType(0);
11456 SDValue Dividend = Node->getOperand(0);
11457 SDValue Divisor = Node->getOperand(1);
11460 Result = DAG.
getNode(DivRemOpc, dl, VTs, Dividend, Divisor).
getValue(1);
11465 SDValue Divide = DAG.
getNode(DivOpc, dl, VT, Dividend, Divisor);
11477 SDValue Src = Node->getOperand(0);
11480 EVT SrcVT = Src.getValueType();
11481 EVT DstVT = Node->getValueType(0);
11483 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
11486 assert(SatWidth <= DstWidth &&
11487 "Expected saturation width smaller than result width");
11491 APInt MinInt, MaxInt;
11502 if (SrcVT == MVT::f16 || SrcVT == MVT::bf16) {
11504 SrcVT = Src.getValueType();
11526 if (AreExactFloatBounds && MinMaxLegal) {
11535 dl, DstVT, Clamped);
11547 return DAG.
getSelect(dl, DstVT, IsNan, ZeroInt, FpToInt);
11586 EVT OperandVT =
Op.getValueType();
11608 AbsWide = DAG.
getBitcast(OperandVT, ClearedSign);
11631 KeepNarrow = DAG.
getNode(
ISD::OR, dl, WideSetCCVT, KeepNarrow, AlreadyOdd);
11640 SDValue Adjust = DAG.
getSelect(dl, ResultIntVT, NarrowIsRd, One, NegativeOne);
11642 Op = DAG.
getSelect(dl, ResultIntVT, KeepNarrow, NarrowBits, Adjusted);
11654 EVT VT = Node->getValueType(0);
11657 if (Node->getConstantOperandVal(1) == 1) {
11660 EVT OperandVT =
Op.getValueType();
11672 EVT I32 =
F32.changeTypeToInteger();
11673 Op = expandRoundInexactToOdd(
F32,
Op, dl, DAG);
11698 EVT I16 = I32.isVector() ? I32.changeVectorElementType(MVT::i16) : MVT::i16;
11708 assert(Node->getValueType(0).isScalableVector() &&
11709 "Fixed length vector types expected to use SHUFFLE_VECTOR!");
11711 EVT VT = Node->getValueType(0);
11712 SDValue V1 = Node->getOperand(0);
11713 SDValue V2 = Node->getOperand(1);
11714 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue();
11733 EVT PtrVT = StackPtr.getValueType();
11735 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
11750 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2));
11752 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr,
11775 return DAG.
getLoad(VT,
DL, StoreV2, StackPtr2,
11782 SDValue Vec = Node->getOperand(0);
11783 SDValue Mask = Node->getOperand(1);
11784 SDValue Passthru = Node->getOperand(2);
11788 EVT MaskVT = Mask.getValueType();
11797 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
11805 bool HasPassthru = !Passthru.
isUndef();
11811 Chain = DAG.
getStore(Chain,
DL, Passthru, StackPtr, PtrInfo);
11814 APInt PassthruSplatVal;
11815 bool IsSplatPassthru =
11818 if (IsSplatPassthru) {
11822 LastWriteVal = DAG.
getConstant(PassthruSplatVal,
DL, ScalarVT);
11823 }
else if (HasPassthru) {
11835 getVectorElementPointer(DAG, StackPtr, VecVT, Popcount);
11837 ScalarVT,
DL, Chain, LastElmtPtr,
11843 for (
unsigned I = 0;
I < NumElms;
I++) {
11847 SDValue OutPtr = getVectorElementPointer(DAG, StackPtr, VecVT, OutPos);
11849 Chain,
DL, ValI, OutPtr,
11862 if (HasPassthru &&
I == NumElms - 1) {
11868 OutPtr = getVectorElementPointer(DAG, StackPtr, VecVT, OutPos);
11872 LastWriteVal = DAG.
getSelect(
DL, ScalarVT, AllLanesSelected, ValI,
11875 Chain,
DL, LastWriteVal, OutPtr,
11880 return DAG.
getLoad(VecVT,
DL, Chain, StackPtr, PtrInfo);
11886 SDValue EVL,
bool &NeedInvert,
11888 bool IsSignaling)
const {
11889 MVT OpVT =
LHS.getSimpleValueType();
11891 NeedInvert =
false;
11892 assert(!EVL == !Mask &&
"VP Mask and EVL must either both be set or unset");
11893 bool IsNonVP = !EVL;
11908 bool NeedSwap =
false;
11909 InvCC = getSetCCInverse(CCCode, OpVT);
11925 if (OpVT == MVT::i1) {
11978 "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
11983 "If SETO is expanded, SETOEQ must be legal!");
12000 NeedInvert = ((
unsigned)CCCode & 0x8U);
12041 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC1, Chain, IsSignaling);
12042 SetCC2 = DAG.
getSetCC(dl, VT,
LHS,
RHS, CC2, Chain, IsSignaling);
12050 SetCC1 = DAG.
getSetCC(dl, VT,
LHS,
LHS, CC1, Chain, IsSignaling);
12051 SetCC2 = DAG.
getSetCC(dl, VT,
RHS,
RHS, CC2, Chain, IsSignaling);
12061 LHS = DAG.
getNode(Opc, dl, VT, SetCC1, SetCC2);
12065 Opc = Opc ==
ISD::OR ? ISD::VP_OR : ISD::VP_AND;
12066 LHS = DAG.
getNode(Opc, dl, VT, SetCC1, SetCC2, Mask, EVL);
12078 EVT VT = Node->getValueType(0);
12090 unsigned Opcode = Node->getOpcode();
12097 for (
const SDValue &V : Node->op_values()) {
unsigned const MachineRegisterInfo * MRI
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
block Block Frequency Analysis
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
static bool isUndef(ArrayRef< int > Mask)
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
static bool isNonZeroModBitWidthOrUndef(const MachineRegisterInfo &MRI, Register Reg, unsigned BW)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
Function const char * Passes
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static bool lowerImmediateIfPossible(TargetLowering::ConstraintPair &P, SDValue Op, SelectionDAG *DAG, const TargetLowering &TLI)
If we have an immediate, see if we can lower it.
static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG)
static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, const APInt &UndefOp0, const APInt &UndefOp1)
Given a vector binary operation and known undefined elements for each input operand,...
static SDValue BuildExactUDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact UDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, EVT VecVT, const SDLoc &dl, ElementCount SubEC)
static unsigned getConstraintPiority(TargetLowering::ConstraintType CT)
Return a number indicating our preference for chosing a type of constraint over another,...
static std::optional< bool > isFCmpEqualZero(FPClassTest Test, const fltSemantics &Semantics, const MachineFunction &MF)
Returns a true value if if this FPClassTest can be performed with an ordered fcmp to 0,...
static void turnVectorIntoSplatVector(MutableArrayRef< SDValue > Values, std::function< bool(SDValue)> Predicate, SDValue AlternativeReplacement=SDValue())
If all values in Values that don't match the predicate are same 'splat' value, then replace all value...
static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT)
static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created)
Given an exact SDIV by a constant, create a multiplication with the multiplicative inverse of the con...
static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, SDValue N0, const APInt &C1, ISD::CondCode Cond, const SDLoc &dl, SelectionDAG &DAG)
static SDValue combineShiftToAVG(SDValue Op, TargetLowering::TargetLoweringOpt &TLO, const TargetLowering &TLI, const APInt &DemandedBits, const APInt &DemandedElts, unsigned Depth)
This file describes how to lower LLVM code to machine code.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, SelectionDAG &DAG)
Scalarize a vector store, bitcasting to TargetVT to determine the scalar type.
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
void clearAllBits()
Set every bit to 0.
APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned countLeadingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
void setAllBits()
Set every bit to 1.
APInt multiplicativeInverse() const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void clearHighBits(unsigned hiBits)
Set top hiBits bits to 0.
int64_t getSExtValue() const
Get sign extended value.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void setBitVal(unsigned BitPosition, bool BitValue)
Set a given bit to a given value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool hasAttributes() const
Return true if the builder has IR-level attributes.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
ConstantFP - Floating Point Values [float, double].
This class represents a range of values.
const APInt & getAPIntValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
AttributeList getAttributes() const
Return the attribute list for this Function.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
std::vector< std::string > ConstraintCodeVector
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setAdjustsStack(bool V)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
Function & getFunction()
Return the LLVM function that this machine code represents.
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const GlobalVariable * getNamedGlobal(StringRef Name) const
Return the global variable in the module with the specified name, of arbitrary type.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
bool willNotOverflowAdd(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the addition of 2 nodes can never overflow.
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
bool isKnownNeverSNaN(SDValue Op, unsigned Depth=0) const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
bool willNotOverflowSub(bool IsSigned, SDValue N0, SDValue N1) const
Determine if the result of the sub of 2 nodes can never overflow.
bool shouldOptForSize() const
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
std::optional< uint64_t > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
std::optional< uint64_t > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
const TargetMachine & getTargetMachine() const
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to me...
virtual EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual bool shouldExpandCmpUsingSelects(EVT VT) const
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean ...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
NegatibleCost
Enum that specifies when a float negation is beneficial.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const
Get the CondCode that's to be used to test the result of the comparison libcall against zero.
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom for a comparison of the specified type...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_UNDEF nodes.
SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_UNDEF nodes.
virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const
Expands target specific indirect branch for the case of JumpTable expansion.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector eleme...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
SDValue expandVPCTTZElements(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes.
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success...
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
SDValue expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimumnum/fmaximumnum into multiple comparison with selects.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
SDValue expandVectorNaryOpBySplitting(SDNode *Node, SelectionDAG &DAG) const
SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimum/fmaximum into multiple comparison with selects.
SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned NumBitsPerElt) const
Expand CTTZ via Table Lookup.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue()) const
Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit urem by constant and other arit...
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isPositionIndependent() const
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed, EVT WideVT, const SDValue LL, const SDValue LH, const SDValue RL, const SDValue RH, SDValue &Lo, SDValue &Hi) const
forceExpandWideMUL - Unconditionally expand a MUL into either a libcall or brute force via a wide mul...
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
TargetLowering(const TargetLowering &)=delete
bool isConstFalseVal(SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const
Return if N is a True value when extended to VT.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
bool isConstTrueVal(SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const
Try to convert the fminnum/fmaxnum to a compare/select sequence.
SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]CMP.
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
SDValue expandVectorFindLastActive(SDNode *N, SelectionDAG &DAG) const
Expand VECTOR_FIND_LAST_ACTIVE nodes.
virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SREM lowering for power-of-2 denominators.
bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively,...
SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
iterator_range< regclass_iterator > regclasses() const
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
const fltSemantics & getFltSemantics() const
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantSDNode predicate.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
FPClassTest invertFPClassTestIfSimpler(FPClassTest Test, bool UseFCmp)
Evaluates if the specified FP class test is better performed as the inverse (i.e.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
auto find_if_not(R &&Range, UnaryPredicate P)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
constexpr unsigned BitWidth
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
int MatchingInput
MatchingInput - If this is not -1, this is an output constraint where an input constraint is required...
ConstraintCodeVector Codes
Code - The constraint code, either the register name (in braces) or the constraint letter/number.
SubConstraintInfoVector multipleAlternatives
multipleAlternatives - If there are multiple alternative constraints, this array will contain them.
bool isIndirect
isIndirect - True if this operand is an indirect operand.
bool hasMatchingInput() const
hasMatchingInput - Return true if this is an output constraint that has a matching input constraint.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
static std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
static std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
static std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasNoUnsignedWrap() const
bool hasNoSignedWrap() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Magic data for optimising signed division by a constant.
unsigned ShiftAmount
shift amount
static SignedDivisionByConstantInfo get(const APInt &D)
Calculate the magic numbers required to implement a signed integer division by a constant as a sequen...
This contains information for each constraint that we are lowering.
MVT ConstraintVT
The ValueType for the operand value.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
std::string ConstraintCode
This contains the actual string for the code, like "m".
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches.
bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)
bool isBeforeLegalizeOps() const
void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
ArrayRef< EVT > OpsVTBeforeSoften
bool IsPostTypeLegalization
MakeLibCallOptions & setIsSigned(bool Value=true)
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)
bool LegalOperations() const
Magic data for optimising unsigned division by a constant.
unsigned PreShift
pre-shift amount
static UnsignedDivisionByConstantInfo get(const APInt &D, unsigned LeadingZeros=0, bool AllowEvenDivisorOptimization=true)
Calculate the magic numbers required to implement an unsigned integer division by a constant as a seq...
unsigned PostShift
post-shift amount