@@ -6586,13 +6586,12 @@ SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
6586
6586
// The number of bytes being extracted.
6587
6587
unsigned BytesPerElement = VecVT.getVectorElementType ().getStoreSize ();
6588
6588
6589
- for (;; ) {
6589
+ while ( canTreatAsByteVector (Op. getValueType ()) ) {
6590
6590
unsigned Opcode = Op.getOpcode ();
6591
6591
if (Opcode == ISD::BITCAST)
6592
6592
// Look through bitcasts.
6593
6593
Op = Op.getOperand (0 );
6594
- else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) &&
6595
- canTreatAsByteVector (Op.getValueType ())) {
6594
+ else if (Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) {
6596
6595
// Get a VPERM-like permute mask and see whether the bytes covered
6597
6596
// by the extracted element are a contiguous sequence from one
6598
6597
// source operand.
@@ -6614,8 +6613,7 @@ SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
6614
6613
Index = Byte / BytesPerElement;
6615
6614
Op = Op.getOperand (unsigned (First) / Bytes.size ());
6616
6615
Force = true ;
6617
- } else if (Opcode == ISD::BUILD_VECTOR &&
6618
- canTreatAsByteVector (Op.getValueType ())) {
6616
+ } else if (Opcode == ISD::BUILD_VECTOR) {
6619
6617
// We can only optimize this case if the BUILD_VECTOR elements are
6620
6618
// at least as wide as the extracted value.
6621
6619
EVT OpVT = Op.getValueType ();
@@ -6644,7 +6642,6 @@ SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
6644
6642
} else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
6645
6643
Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
6646
6644
Opcode == ISD::ANY_EXTEND_VECTOR_INREG) &&
6647
- canTreatAsByteVector (Op.getValueType ()) &&
6648
6645
canTreatAsByteVector (Op.getOperand (0 ).getValueType ())) {
6649
6646
// Make sure that only the unextended bits are significant.
6650
6647
EVT ExtVT = Op.getValueType ();
0 commit comments