@@ -1265,7 +1265,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
1265
1265
setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i16, Legal);
1266
1266
setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i32, Legal);
1267
1267
setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i32, Legal);
1268
-
1268
+
1269
1269
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1270
1270
setOperationAction(ISD::SETCC, MVT::i1, Custom);
1271
1271
setOperationAction(ISD::XOR, MVT::i1, Legal);
@@ -5281,7 +5281,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
5281
5281
SDValue In = Op.getOperand(idx);
5282
5282
if (In.getOpcode() == ISD::UNDEF)
5283
5283
continue;
5284
- if (!isa<ConstantSDNode>(In))
5284
+ if (!isa<ConstantSDNode>(In))
5285
5285
NonConstIdx.push_back(idx);
5286
5286
else {
5287
5287
Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
@@ -5308,7 +5308,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
5308
5308
}
5309
5309
else if (HasConstElts)
5310
5310
Imm = DAG.getConstant(0, dl, VT);
5311
- else
5311
+ else
5312
5312
Imm = DAG.getUNDEF(VT);
5313
5313
if (Imm.getValueSizeInBits() == VT.getSizeInBits())
5314
5314
DstVec = DAG.getBitcast(VT, Imm);
@@ -12169,14 +12169,14 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
12169
12169
if (InVT.is512BitVector() && InVT.getScalarSizeInBits() <= 16 &&
12170
12170
Subtarget->hasBWI())
12171
12171
return Op; // legal, will go to VPMOVB2M, VPMOVW2M
12172
- if ((InVT.is256BitVector() || InVT.is128BitVector())
12172
+ if ((InVT.is256BitVector() || InVT.is128BitVector())
12173
12173
&& InVT.getScalarSizeInBits() <= 16 &&
12174
12174
Subtarget->hasBWI() && Subtarget->hasVLX())
12175
12175
return Op; // legal, will go to VPMOVB2M, VPMOVW2M
12176
12176
if (InVT.is512BitVector() && InVT.getScalarSizeInBits() >= 32 &&
12177
12177
Subtarget->hasDQI())
12178
12178
return Op; // legal, will go to VPMOVD2M, VPMOVQ2M
12179
- if ((InVT.is256BitVector() || InVT.is128BitVector())
12179
+ if ((InVT.is256BitVector() || InVT.is128BitVector())
12180
12180
&& InVT.getScalarSizeInBits() >= 32 &&
12181
12181
Subtarget->hasDQI() && Subtarget->hasVLX())
12182
12182
return Op; // legal, will go to VPMOVB2M, VPMOVQ2M
@@ -13665,7 +13665,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
13665
13665
else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
13666
13666
Op2Scalar = Op2.getOperand(0);
13667
13667
if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
13668
- SDValue newSelect = DAG.getNode(ISD::SELECT, DL,
13668
+ SDValue newSelect = DAG.getNode(ISD::SELECT, DL,
13669
13669
Op1Scalar.getValueType(),
13670
13670
Cond, Op1Scalar, Op2Scalar);
13671
13671
if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
@@ -16474,16 +16474,16 @@ static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
16474
16474
16475
16475
// Return true if the requred (according to Opcode) shift-imm form is natively
16476
16476
// supported by the Subtarget
16477
- static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget *Subtarget,
16477
+ static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget *Subtarget,
16478
16478
unsigned Opcode) {
16479
16479
if (VT.getScalarSizeInBits() < 16)
16480
16480
return false;
16481
-
16481
+
16482
16482
if (VT.is512BitVector() &&
16483
16483
(VT.getScalarSizeInBits() > 16 || Subtarget->hasBWI()))
16484
16484
return true;
16485
16485
16486
- bool LShift = VT.is128BitVector() ||
16486
+ bool LShift = VT.is128BitVector() ||
16487
16487
(VT.is256BitVector() && Subtarget->hasInt256());
16488
16488
16489
16489
bool AShift = LShift && (Subtarget->hasVLX() ||
@@ -16493,15 +16493,15 @@ static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget *Subtarget,
16493
16493
16494
16494
// The shift amount is a variable, but it is the same for all vector lanes.
16495
16495
// These instrcutions are defined together with shift-immediate.
16496
- static
16497
- bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget *Subtarget,
16496
+ static
16497
+ bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget *Subtarget,
16498
16498
unsigned Opcode) {
16499
16499
return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
16500
16500
}
16501
16501
16502
16502
// Return true if the requred (according to Opcode) variable-shift form is
16503
16503
// natively supported by the Subtarget
16504
- static bool SupportedVectorVarShift(MVT VT, const X86Subtarget *Subtarget,
16504
+ static bool SupportedVectorVarShift(MVT VT, const X86Subtarget *Subtarget,
16505
16505
unsigned Opcode) {
16506
16506
16507
16507
if (!Subtarget->hasInt256() || VT.getScalarSizeInBits() < 16)
0 commit comments