@@ -6569,13 +6569,12 @@ SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
6569
6569
// The number of bytes being extracted.
6570
6570
unsigned BytesPerElement = VecVT.getVectorElementType ().getStoreSize ();
6571
6571
6572
- for (;; ) {
6572
+ while ( canTreatAsByteVector (Op. getValueType ()) ) {
6573
6573
unsigned Opcode = Op.getOpcode ();
6574
6574
if (Opcode == ISD::BITCAST)
6575
6575
// Look through bitcasts.
6576
6576
Op = Op.getOperand (0 );
6577
- else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) &&
6578
- canTreatAsByteVector (Op.getValueType ())) {
6577
+ else if (Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) {
6579
6578
// Get a VPERM-like permute mask and see whether the bytes covered
6580
6579
// by the extracted element are a contiguous sequence from one
6581
6580
// source operand.
@@ -6597,8 +6596,7 @@ SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
6597
6596
Index = Byte / BytesPerElement;
6598
6597
Op = Op.getOperand (unsigned (First) / Bytes.size ());
6599
6598
Force = true ;
6600
- } else if (Opcode == ISD::BUILD_VECTOR &&
6601
- canTreatAsByteVector (Op.getValueType ())) {
6599
+ } else if (Opcode == ISD::BUILD_VECTOR) {
6602
6600
// We can only optimize this case if the BUILD_VECTOR elements are
6603
6601
// at least as wide as the extracted value.
6604
6602
EVT OpVT = Op.getValueType ();
@@ -6627,7 +6625,6 @@ SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
6627
6625
} else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
6628
6626
Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
6629
6627
Opcode == ISD::ANY_EXTEND_VECTOR_INREG) &&
6630
- canTreatAsByteVector (Op.getValueType ()) &&
6631
6628
canTreatAsByteVector (Op.getOperand (0 ).getValueType ())) {
6632
6629
// Make sure that only the unextended bits are significant.
6633
6630
EVT ExtVT = Op.getValueType ();
0 commit comments