forked from OSchip/llvm-project
Bug fixes and updates for CellSPU, syncing up with trunk. Most notable
fixes are target-specific lowering of frame indices, fix constants generated for the FSMBI instruction, and fixing SPUTargetLowering::computeMaskedBitsFor- TargetNode(). llvm-svn: 50462
This commit is contained in:
parent
be940424b3
commit
c3a1910a07
|
|
@ -57,6 +57,10 @@ namespace llvm {
|
||||||
static int fullSpillSize() {
|
static int fullSpillSize() {
|
||||||
return (SPURegisterInfo::getNumArgRegs() * stackSlotSize());
|
return (SPURegisterInfo::getNumArgRegs() * stackSlotSize());
|
||||||
}
|
}
|
||||||
|
//! Convert frame index to stack offset
|
||||||
|
static int FItoStackOffset(int frame_index) {
|
||||||
|
return frame_index * stackSlotSize();
|
||||||
|
}
|
||||||
//! Number of instructions required to overcome hint-for-branch latency
|
//! Number of instructions required to overcome hint-for-branch latency
|
||||||
/*!
|
/*!
|
||||||
HBR (hint-for-branch) instructions can be inserted when, for example,
|
HBR (hint-for-branch) instructions can be inserted when, for example,
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,7 @@
|
||||||
#include "SPUISelLowering.h"
|
#include "SPUISelLowering.h"
|
||||||
#include "SPUHazardRecognizers.h"
|
#include "SPUHazardRecognizers.h"
|
||||||
#include "SPUFrameInfo.h"
|
#include "SPUFrameInfo.h"
|
||||||
|
#include "SPURegisterNames.h"
|
||||||
#include "llvm/CodeGen/MachineConstantPool.h"
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
||||||
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
||||||
#include "llvm/CodeGen/MachineFunction.h"
|
#include "llvm/CodeGen/MachineFunction.h"
|
||||||
|
|
@ -411,7 +412,10 @@ SPUDAGToDAGISel::SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
|
||||||
bool
|
bool
|
||||||
SPUDAGToDAGISel::SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp,
|
SPUDAGToDAGISel::SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp,
|
||||||
SDOperand &Base) {
|
SDOperand &Base) {
|
||||||
return DFormAddressPredicate(Op, N, Disp, Base, -(1 << 7), (1 << 7) - 1);
|
const int minDForm2Offset = -(1 << 7);
|
||||||
|
const int maxDForm2Offset = (1 << 7) - 1;
|
||||||
|
return DFormAddressPredicate(Op, N, Disp, Base, minDForm2Offset,
|
||||||
|
maxDForm2Offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
|
|
@ -443,12 +447,13 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Bas
|
||||||
|
|
||||||
if (Opc == ISD::FrameIndex) {
|
if (Opc == ISD::FrameIndex) {
|
||||||
// Stack frame index must be less than 512 (divided by 16):
|
// Stack frame index must be less than 512 (divided by 16):
|
||||||
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N);
|
FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N);
|
||||||
|
int FI = int(FIN->getIndex());
|
||||||
DEBUG(cerr << "SelectDFormAddr: ISD::FrameIndex = "
|
DEBUG(cerr << "SelectDFormAddr: ISD::FrameIndex = "
|
||||||
<< FI->getIndex() << "\n");
|
<< FI << "\n");
|
||||||
if (FI->getIndex() < maxOffset) {
|
if (SPUFrameInfo::FItoStackOffset(FI) < maxOffset) {
|
||||||
Base = CurDAG->getTargetConstant(0, PtrTy);
|
Base = CurDAG->getTargetConstant(0, PtrTy);
|
||||||
Index = CurDAG->getTargetFrameIndex(FI->getIndex(), PtrTy);
|
Index = CurDAG->getTargetFrameIndex(FI, PtrTy);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else if (Opc == ISD::ADD) {
|
} else if (Opc == ISD::ADD) {
|
||||||
|
|
@ -467,13 +472,14 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Bas
|
||||||
int32_t offset = int32_t(CN->getSignExtended());
|
int32_t offset = int32_t(CN->getSignExtended());
|
||||||
|
|
||||||
if (Op0.getOpcode() == ISD::FrameIndex) {
|
if (Op0.getOpcode() == ISD::FrameIndex) {
|
||||||
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op0);
|
FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op0);
|
||||||
|
int FI = int(FIN->getIndex());
|
||||||
DEBUG(cerr << "SelectDFormAddr: ISD::ADD offset = " << offset
|
DEBUG(cerr << "SelectDFormAddr: ISD::ADD offset = " << offset
|
||||||
<< " frame index = " << FI->getIndex() << "\n");
|
<< " frame index = " << FI << "\n");
|
||||||
|
|
||||||
if (FI->getIndex() < maxOffset) {
|
if (SPUFrameInfo::FItoStackOffset(FI) < maxOffset) {
|
||||||
Base = CurDAG->getTargetConstant(offset, PtrTy);
|
Base = CurDAG->getTargetConstant(offset, PtrTy);
|
||||||
Index = CurDAG->getTargetFrameIndex(FI->getIndex(), PtrTy);
|
Index = CurDAG->getTargetFrameIndex(FI, PtrTy);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else if (offset > minOffset && offset < maxOffset) {
|
} else if (offset > minOffset && offset < maxOffset) {
|
||||||
|
|
@ -487,13 +493,14 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Bas
|
||||||
int32_t offset = int32_t(CN->getSignExtended());
|
int32_t offset = int32_t(CN->getSignExtended());
|
||||||
|
|
||||||
if (Op1.getOpcode() == ISD::FrameIndex) {
|
if (Op1.getOpcode() == ISD::FrameIndex) {
|
||||||
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op1);
|
FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op1);
|
||||||
|
int FI = int(FIN->getIndex());
|
||||||
DEBUG(cerr << "SelectDFormAddr: ISD::ADD offset = " << offset
|
DEBUG(cerr << "SelectDFormAddr: ISD::ADD offset = " << offset
|
||||||
<< " frame index = " << FI->getIndex() << "\n");
|
<< " frame index = " << FI << "\n");
|
||||||
|
|
||||||
if (FI->getIndex() < maxOffset) {
|
if (SPUFrameInfo::FItoStackOffset(FI) < maxOffset) {
|
||||||
Base = CurDAG->getTargetConstant(offset, PtrTy);
|
Base = CurDAG->getTargetConstant(offset, PtrTy);
|
||||||
Index = CurDAG->getTargetFrameIndex(FI->getIndex(), PtrTy);
|
Index = CurDAG->getTargetFrameIndex(FI, PtrTy);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else if (offset > minOffset && offset < maxOffset) {
|
} else if (offset > minOffset && offset < maxOffset) {
|
||||||
|
|
@ -583,17 +590,31 @@ SPUDAGToDAGISel::Select(SDOperand Op) {
|
||||||
if (Opc >= ISD::BUILTIN_OP_END && Opc < SPUISD::FIRST_NUMBER) {
|
if (Opc >= ISD::BUILTIN_OP_END && Opc < SPUISD::FIRST_NUMBER) {
|
||||||
return NULL; // Already selected.
|
return NULL; // Already selected.
|
||||||
} else if (Opc == ISD::FrameIndex) {
|
} else if (Opc == ISD::FrameIndex) {
|
||||||
// Selects to AIr32 FI, 0 which in turn will become AIr32 SP, imm.
|
// Selects to (add $sp, FI * stackSlotSize)
|
||||||
int FI = cast<FrameIndexSDNode>(N)->getIndex();
|
int FI =
|
||||||
|
SPUFrameInfo::FItoStackOffset(cast<FrameIndexSDNode>(N)->getIndex());
|
||||||
MVT::ValueType PtrVT = SPUtli.getPointerTy();
|
MVT::ValueType PtrVT = SPUtli.getPointerTy();
|
||||||
SDOperand Zero = CurDAG->getTargetConstant(0, PtrVT);
|
|
||||||
SDOperand TFI = CurDAG->getTargetFrameIndex(FI, PtrVT);
|
|
||||||
|
|
||||||
DEBUG(cerr << "SPUDAGToDAGISel: Replacing FrameIndex with AI32 <FI>, 0\n");
|
// Adjust stack slot to actual offset in frame:
|
||||||
NewOpc = SPU::AIr32;
|
if (isS10Constant(FI)) {
|
||||||
Ops[0] = TFI;
|
DEBUG(cerr << "SPUDAGToDAGISel: Replacing FrameIndex with AIr32 $sp, "
|
||||||
Ops[1] = Zero;
|
<< FI
|
||||||
n_ops = 2;
|
<< "\n");
|
||||||
|
NewOpc = SPU::AIr32;
|
||||||
|
Ops[0] = CurDAG->getRegister(SPU::R1, PtrVT);
|
||||||
|
Ops[1] = CurDAG->getTargetConstant(FI, PtrVT);
|
||||||
|
n_ops = 2;
|
||||||
|
} else {
|
||||||
|
DEBUG(cerr << "SPUDAGToDAGISel: Replacing FrameIndex with Ar32 $sp, "
|
||||||
|
<< FI
|
||||||
|
<< "\n");
|
||||||
|
NewOpc = SPU::Ar32;
|
||||||
|
Ops[0] = CurDAG->getRegister(SPU::R1, PtrVT);
|
||||||
|
Ops[1] = CurDAG->getConstant(FI, PtrVT);
|
||||||
|
n_ops = 2;
|
||||||
|
|
||||||
|
AddToISelQueue(Ops[1]);
|
||||||
|
}
|
||||||
} else if (Opc == ISD::ZERO_EXTEND) {
|
} else if (Opc == ISD::ZERO_EXTEND) {
|
||||||
// (zero_extend:i16 (and:i8 <arg>, <const>))
|
// (zero_extend:i16 (and:i8 <arg>, <const>))
|
||||||
const SDOperand &Op1 = N->getOperand(0);
|
const SDOperand &Op1 = N->getOperand(0);
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@
|
||||||
#include "SPURegisterNames.h"
|
#include "SPURegisterNames.h"
|
||||||
#include "SPUISelLowering.h"
|
#include "SPUISelLowering.h"
|
||||||
#include "SPUTargetMachine.h"
|
#include "SPUTargetMachine.h"
|
||||||
|
#include "SPUFrameInfo.h"
|
||||||
#include "llvm/ADT/VectorExtras.h"
|
#include "llvm/ADT/VectorExtras.h"
|
||||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||||
#include "llvm/CodeGen/CallingConvLower.h"
|
#include "llvm/CodeGen/CallingConvLower.h"
|
||||||
|
|
@ -514,6 +515,12 @@ AlignedLoad(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST,
|
||||||
alignOffs = 0;
|
alignOffs = 0;
|
||||||
prefSlotOffs = -vtm->prefslot_byte;
|
prefSlotOffs = -vtm->prefslot_byte;
|
||||||
}
|
}
|
||||||
|
} else if (basePtr.getOpcode() == ISD::FrameIndex) {
|
||||||
|
FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(basePtr);
|
||||||
|
alignOffs = int(FIN->getIndex() * SPUFrameInfo::stackSlotSize());
|
||||||
|
prefSlotOffs = (int) (alignOffs & 0xf);
|
||||||
|
prefSlotOffs -= vtm->prefslot_byte;
|
||||||
|
basePtr = DAG.getRegister(SPU::R1, VT);
|
||||||
} else {
|
} else {
|
||||||
alignOffs = 0;
|
alignOffs = 0;
|
||||||
prefSlotOffs = -vtm->prefslot_byte;
|
prefSlotOffs = -vtm->prefslot_byte;
|
||||||
|
|
@ -1912,7 +1919,7 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) {
|
||||||
SDOperand FSMBOp =
|
SDOperand FSMBOp =
|
||||||
DAG.getCopyToReg(Chain, FSMBIreg,
|
DAG.getCopyToReg(Chain, FSMBIreg,
|
||||||
DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
|
DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
|
||||||
DAG.getConstant(0xcccc, MVT::i32)));
|
DAG.getConstant(0xcccc, MVT::i16)));
|
||||||
|
|
||||||
SDOperand HHProd =
|
SDOperand HHProd =
|
||||||
DAG.getCopyToReg(FSMBOp, HiProdReg,
|
DAG.getCopyToReg(FSMBOp, HiProdReg,
|
||||||
|
|
@ -1956,7 +1963,7 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) {
|
||||||
DAG.getNode(SPUISD::MPY, MVT::v8i16, rALH, rBLH), c8);
|
DAG.getNode(SPUISD::MPY, MVT::v8i16, rALH, rBLH), c8);
|
||||||
|
|
||||||
SDOperand FSMBmask = DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
|
SDOperand FSMBmask = DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
|
||||||
DAG.getConstant(0x2222, MVT::i32));
|
DAG.getConstant(0x2222, MVT::i16));
|
||||||
|
|
||||||
SDOperand LoProdParts =
|
SDOperand LoProdParts =
|
||||||
DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32,
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32,
|
||||||
|
|
@ -2271,6 +2278,7 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc)
|
||||||
|
|
||||||
assert(Op0VT == MVT::i32
|
assert(Op0VT == MVT::i32
|
||||||
&& "CellSPU: Zero/sign extending something other than i32");
|
&& "CellSPU: Zero/sign extending something other than i32");
|
||||||
|
DEBUG(cerr << "CellSPU: LowerI64Math custom lowering zero/sign/any extend\n");
|
||||||
|
|
||||||
unsigned NewOpc = (Opc == ISD::SIGN_EXTEND
|
unsigned NewOpc = (Opc == ISD::SIGN_EXTEND
|
||||||
? SPUISD::ROTBYTES_RIGHT_S
|
? SPUISD::ROTBYTES_RIGHT_S
|
||||||
|
|
@ -2764,7 +2772,7 @@ SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Otherwise, return unchanged.
|
// Otherwise, return unchanged.
|
||||||
#if 0
|
#if 1
|
||||||
if (Result.Val) {
|
if (Result.Val) {
|
||||||
DEBUG(cerr << "\nReplace.SPU: ");
|
DEBUG(cerr << "\nReplace.SPU: ");
|
||||||
DEBUG(N->dump(&DAG));
|
DEBUG(N->dump(&DAG));
|
||||||
|
|
@ -2833,7 +2841,9 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
|
||||||
APInt &KnownOne,
|
APInt &KnownOne,
|
||||||
const SelectionDAG &DAG,
|
const SelectionDAG &DAG,
|
||||||
unsigned Depth ) const {
|
unsigned Depth ) const {
|
||||||
|
#if 0
|
||||||
const uint64_t uint64_sizebits = sizeof(uint64_t) * 8;
|
const uint64_t uint64_sizebits = sizeof(uint64_t) * 8;
|
||||||
|
#endif
|
||||||
|
|
||||||
switch (Op.getOpcode()) {
|
switch (Op.getOpcode()) {
|
||||||
default:
|
default:
|
||||||
|
|
@ -2849,18 +2859,22 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
|
||||||
|
|
||||||
case SPUISD::PROMOTE_SCALAR: {
|
case SPUISD::PROMOTE_SCALAR: {
|
||||||
SDOperand Op0 = Op.getOperand(0);
|
SDOperand Op0 = Op.getOperand(0);
|
||||||
uint64_t InMask = MVT::getIntVTBitMask(Op0.getValueType());
|
MVT::ValueType Op0VT = Op0.getValueType();
|
||||||
KnownZero |= APInt(uint64_sizebits, ~InMask, false);
|
unsigned Op0VTBits = MVT::getSizeInBits(Op0VT);
|
||||||
KnownOne |= APInt(uint64_sizebits, InMask, false);
|
uint64_t InMask = MVT::getIntVTBitMask(Op0VT);
|
||||||
|
KnownZero |= APInt(Op0VTBits, ~InMask, false);
|
||||||
|
KnownOne |= APInt(Op0VTBits, InMask, false);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case SPUISD::LDRESULT:
|
case SPUISD::LDRESULT:
|
||||||
case SPUISD::EXTRACT_ELT0:
|
case SPUISD::EXTRACT_ELT0:
|
||||||
case SPUISD::EXTRACT_ELT0_CHAINED: {
|
case SPUISD::EXTRACT_ELT0_CHAINED: {
|
||||||
uint64_t InMask = MVT::getIntVTBitMask(Op.getValueType());
|
MVT::ValueType OpVT = Op.getValueType();
|
||||||
KnownZero |= APInt(uint64_sizebits, ~InMask, false);
|
unsigned OpVTBits = MVT::getSizeInBits(OpVT);
|
||||||
KnownOne |= APInt(uint64_sizebits, InMask, false);
|
uint64_t InMask = MVT::getIntVTBitMask(OpVT);
|
||||||
|
KnownZero |= APInt(OpVTBits, ~InMask, false);
|
||||||
|
KnownOne |= APInt(OpVTBits, InMask, false);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2873,21 +2887,20 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
|
||||||
case MPYU:
|
case MPYU:
|
||||||
case MPYH:
|
case MPYH:
|
||||||
case MPYHH:
|
case MPYHH:
|
||||||
case SHLQUAD_L_BITS:
|
case SPUISD::SHLQUAD_L_BITS:
|
||||||
case SHLQUAD_L_BYTES:
|
case SPUISD::SHLQUAD_L_BYTES:
|
||||||
case VEC_SHL:
|
case SPUISD::VEC_SHL:
|
||||||
case VEC_SRL:
|
case SPUISD::VEC_SRL:
|
||||||
case VEC_SRA:
|
case SPUISD::VEC_SRA:
|
||||||
case VEC_ROTL:
|
case SPUISD::VEC_ROTL:
|
||||||
case VEC_ROTR:
|
case SPUISD::VEC_ROTR:
|
||||||
case ROTQUAD_RZ_BYTES:
|
case SPUISD::ROTQUAD_RZ_BYTES:
|
||||||
case ROTQUAD_RZ_BITS:
|
case SPUISD::ROTQUAD_RZ_BITS:
|
||||||
case ROTBYTES_RIGHT_S:
|
case SPUISD::ROTBYTES_RIGHT_S:
|
||||||
case ROTBYTES_LEFT:
|
case SPUISD::ROTBYTES_LEFT:
|
||||||
case ROTBYTES_LEFT_CHAINED:
|
case SPUISD::ROTBYTES_LEFT_CHAINED:
|
||||||
case FSMBI:
|
case FSMBI:
|
||||||
case SELB:
|
case SELB:
|
||||||
case SFPConstant:
|
|
||||||
case FPInterp:
|
case FPInterp:
|
||||||
case FPRecipEst:
|
case FPRecipEst:
|
||||||
case SEXT32TO64:
|
case SEXT32TO64:
|
||||||
|
|
@ -2895,6 +2908,16 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LowerAsmOperandForConstraint
|
||||||
|
void
|
||||||
|
SPUTargetLowering::LowerAsmOperandForConstraint(SDOperand Op,
|
||||||
|
char ConstraintLetter,
|
||||||
|
std::vector<SDOperand> &Ops,
|
||||||
|
SelectionDAG &DAG) const {
|
||||||
|
// Default, for the time being, to the base class handler
|
||||||
|
TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, Ops, DAG);
|
||||||
|
}
|
||||||
|
|
||||||
/// isLegalAddressImmediate - Return true if the integer value can be used
|
/// isLegalAddressImmediate - Return true if the integer value can be used
|
||||||
/// as the offset of the target addressing mode.
|
/// as the offset of the target addressing mode.
|
||||||
bool SPUTargetLowering::isLegalAddressImmediate(int64_t V, const Type *Ty) const {
|
bool SPUTargetLowering::isLegalAddressImmediate(int64_t V, const Type *Ty) const {
|
||||||
|
|
|
||||||
|
|
@ -125,6 +125,10 @@ namespace llvm {
|
||||||
getRegForInlineAsmConstraint(const std::string &Constraint,
|
getRegForInlineAsmConstraint(const std::string &Constraint,
|
||||||
MVT::ValueType VT) const;
|
MVT::ValueType VT) const;
|
||||||
|
|
||||||
|
void LowerAsmOperandForConstraint(SDOperand Op, char ConstraintLetter,
|
||||||
|
std::vector<SDOperand> &Ops,
|
||||||
|
SelectionDAG &DAG) const;
|
||||||
|
|
||||||
/// isLegalAddressImmediate - Return true if the integer value can be used
|
/// isLegalAddressImmediate - Return true if the integer value can be used
|
||||||
/// as the offset of the target addressing mode.
|
/// as the offset of the target addressing mode.
|
||||||
virtual bool isLegalAddressImmediate(int64_t V, const Type *Ty) const;
|
virtual bool isLegalAddressImmediate(int64_t V, const Type *Ty) const;
|
||||||
|
|
|
||||||
|
|
@ -22,10 +22,10 @@
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
let hasCtrlDep = 1, Defs = [R1], Uses = [R1] in {
|
let hasCtrlDep = 1, Defs = [R1], Uses = [R1] in {
|
||||||
def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt),
|
def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm_i32:$amt),
|
||||||
"${:comment} ADJCALLSTACKDOWN",
|
"${:comment} ADJCALLSTACKDOWN",
|
||||||
[(callseq_start imm:$amt)]>;
|
[(callseq_start imm:$amt)]>;
|
||||||
def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt),
|
def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm_i32:$amt),
|
||||||
"${:comment} ADJCALLSTACKUP",
|
"${:comment} ADJCALLSTACKUP",
|
||||||
[(callseq_end imm:$amt)]>;
|
[(callseq_end imm:$amt)]>;
|
||||||
}
|
}
|
||||||
|
|
@ -290,18 +290,34 @@ def CWD : RI7Form<0b01101111100, (outs VECREG:$rT), (ins memri7:$src),
|
||||||
"cwd\t$rT, $src", ShuffleOp,
|
"cwd\t$rT, $src", ShuffleOp,
|
||||||
[(set (v4i32 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
|
[(set (v4i32 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
|
||||||
|
|
||||||
|
def CWDf32 : RI7Form<0b01101111100, (outs VECREG:$rT), (ins memri7:$src),
|
||||||
|
"cwd\t$rT, $src", ShuffleOp,
|
||||||
|
[(set (v4f32 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
|
||||||
|
|
||||||
def CWX : RRForm<0b01101011100, (outs VECREG:$rT), (ins memrr:$src),
|
def CWX : RRForm<0b01101011100, (outs VECREG:$rT), (ins memrr:$src),
|
||||||
"cwx\t$rT, $src", ShuffleOp,
|
"cwx\t$rT, $src", ShuffleOp,
|
||||||
[(set (v4i32 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
|
[(set (v4i32 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
|
||||||
|
|
||||||
|
def CWXf32 : RRForm<0b01101011100, (outs VECREG:$rT), (ins memrr:$src),
|
||||||
|
"cwx\t$rT, $src", ShuffleOp,
|
||||||
|
[(set (v4f32 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
|
||||||
|
|
||||||
def CDD : RI7Form<0b11101111100, (outs VECREG:$rT), (ins memri7:$src),
|
def CDD : RI7Form<0b11101111100, (outs VECREG:$rT), (ins memri7:$src),
|
||||||
"cdd\t$rT, $src", ShuffleOp,
|
"cdd\t$rT, $src", ShuffleOp,
|
||||||
[(set (v2i64 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
|
[(set (v2i64 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
|
||||||
|
|
||||||
|
def CDDf64 : RI7Form<0b11101111100, (outs VECREG:$rT), (ins memri7:$src),
|
||||||
|
"cdd\t$rT, $src", ShuffleOp,
|
||||||
|
[(set (v2f64 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
|
||||||
|
|
||||||
def CDX : RRForm<0b11101011100, (outs VECREG:$rT), (ins memrr:$src),
|
def CDX : RRForm<0b11101011100, (outs VECREG:$rT), (ins memrr:$src),
|
||||||
"cdx\t$rT, $src", ShuffleOp,
|
"cdx\t$rT, $src", ShuffleOp,
|
||||||
[(set (v2i64 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
|
[(set (v2i64 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
|
||||||
|
|
||||||
|
def CDXf64 : RRForm<0b11101011100, (outs VECREG:$rT), (ins memrr:$src),
|
||||||
|
"cdx\t$rT, $src", ShuffleOp,
|
||||||
|
[(set (v2f64 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// Constant formation:
|
// Constant formation:
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
@ -367,10 +383,10 @@ class ILHURegInst<RegisterClass rclass, Operand immtype, PatLeaf xform>:
|
||||||
multiclass ImmLoadHalfwordUpper
|
multiclass ImmLoadHalfwordUpper
|
||||||
{
|
{
|
||||||
def v2i64: ILHUVecInst<v2i64, u16imm_i64, immILHUvec_i64>;
|
def v2i64: ILHUVecInst<v2i64, u16imm_i64, immILHUvec_i64>;
|
||||||
def v4i32: ILHUVecInst<v4i32, u16imm, immILHUvec>;
|
def v4i32: ILHUVecInst<v4i32, u16imm_i32, immILHUvec>;
|
||||||
|
|
||||||
def r64: ILHURegInst<R64C, u16imm_i64, hi16>;
|
def r64: ILHURegInst<R64C, u16imm_i64, hi16>;
|
||||||
def r32: ILHURegInst<R32C, u16imm, hi16>;
|
def r32: ILHURegInst<R32C, u16imm_i32, hi16>;
|
||||||
|
|
||||||
// Loads the high portion of an address
|
// Loads the high portion of an address
|
||||||
def hi: ILHURegInst<R32C, symbolHi, hi16>;
|
def hi: ILHURegInst<R32C, symbolHi, hi16>;
|
||||||
|
|
@ -436,7 +452,7 @@ class IOHLRegInst<RegisterClass rclass, Operand immtype /* , PatLeaf xform */>:
|
||||||
multiclass ImmOrHalfwordLower
|
multiclass ImmOrHalfwordLower
|
||||||
{
|
{
|
||||||
def v2i64: IOHLVecInst<v2i64, u16imm_i64>;
|
def v2i64: IOHLVecInst<v2i64, u16imm_i64>;
|
||||||
def v4i32: IOHLVecInst<v4i32, u16imm>;
|
def v4i32: IOHLVecInst<v4i32, u16imm_i32>;
|
||||||
|
|
||||||
def r32: IOHLRegInst<R32C, i32imm>;
|
def r32: IOHLRegInst<R32C, i32imm>;
|
||||||
def f32: IOHLRegInst<R32FP, f32imm>;
|
def f32: IOHLRegInst<R32FP, f32imm>;
|
||||||
|
|
@ -453,7 +469,7 @@ class FSMBIVec<ValueType vectype>:
|
||||||
RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
|
RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
|
||||||
"fsmbi\t$rT, $val",
|
"fsmbi\t$rT, $val",
|
||||||
SelectOp,
|
SelectOp,
|
||||||
[(set (vectype VECREG:$rT), (SPUfsmbi (i32 immU16:$val)))]>;
|
[(set (vectype VECREG:$rT), (SPUfsmbi (i16 immU16:$val)))]>;
|
||||||
|
|
||||||
multiclass FormSelectMaskBytesImm
|
multiclass FormSelectMaskBytesImm
|
||||||
{
|
{
|
||||||
|
|
@ -3873,6 +3889,13 @@ def : Pat<(SPUindirect (SPUhi tconstpool:$in, 0),
|
||||||
(SPUlo tconstpool:$in, 0)),
|
(SPUlo tconstpool:$in, 0)),
|
||||||
(IOHLlo (ILHUhi tconstpool:$in), tconstpool:$in)>;
|
(IOHLlo (ILHUhi tconstpool:$in), tconstpool:$in)>;
|
||||||
|
|
||||||
|
def : Pat<(SPUindirect R32C:$sp, i32ImmSExt10:$imm),
|
||||||
|
(AIr32 R32C:$sp, i32ImmSExt10:$imm)>;
|
||||||
|
|
||||||
|
def : Pat<(SPUindirect R32C:$sp, imm:$imm),
|
||||||
|
(Ar32 R32C:$sp,
|
||||||
|
(IOHLr32 (ILHUr32 (HI16 imm:$imm)), (LO16 imm:$imm)))>;
|
||||||
|
|
||||||
def : Pat<(add (SPUhi tglobaladdr:$in, 0), (SPUlo tglobaladdr:$in, 0)),
|
def : Pat<(add (SPUhi tglobaladdr:$in, 0), (SPUlo tglobaladdr:$in, 0)),
|
||||||
(IOHLlo (ILHUhi tglobaladdr:$in), tglobaladdr:$in)>;
|
(IOHLlo (ILHUhi tglobaladdr:$in), tglobaladdr:$in)>;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -59,7 +59,8 @@ def SPUv4i32_binop: SDTypeProfile<1, 2, [
|
||||||
// FSMBI type constraints: There are several variations for the various
|
// FSMBI type constraints: There are several variations for the various
|
||||||
// vector types (this avoids having to bit_convert all over the place.)
|
// vector types (this avoids having to bit_convert all over the place.)
|
||||||
def SPUfsmbi_type: SDTypeProfile<1, 1, [
|
def SPUfsmbi_type: SDTypeProfile<1, 1, [
|
||||||
/* SDTCisVT<1, i32> */ SDTCisInt<1>]>;
|
SDTCisInt<1>
|
||||||
|
]>;
|
||||||
|
|
||||||
// SELB type constraints:
|
// SELB type constraints:
|
||||||
def SPUselb_type: SDTypeProfile<1, 3, [
|
def SPUselb_type: SDTypeProfile<1, 3, [
|
||||||
|
|
|
||||||
|
|
@ -509,7 +509,11 @@ def u16imm_i64 : Operand<i64> {
|
||||||
let PrintMethod = "printU16ImmOperand";
|
let PrintMethod = "printU16ImmOperand";
|
||||||
}
|
}
|
||||||
|
|
||||||
def u16imm : Operand<i32> {
|
def u16imm_i32 : Operand<i32> {
|
||||||
|
let PrintMethod = "printU16ImmOperand";
|
||||||
|
}
|
||||||
|
|
||||||
|
def u16imm : Operand<i16> {
|
||||||
let PrintMethod = "printU16ImmOperand";
|
let PrintMethod = "printU16ImmOperand";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue