@@ -3099,7 +3099,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
3099
3099
3100
3100
// fold (and c1, c2) -> c1&c2
3101
3101
ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
3102
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode> (N1);
3102
+ ConstantSDNode *N1C = isConstOrConstSplat (N1);
3103
3103
if (N0C && N1C && !N1C->isOpaque())
3104
3104
return DAG.FoldConstantArithmetic(ISD::AND, SDLoc(N), VT, N0C, N1C);
3105
3105
// canonicalize constant to RHS
@@ -3119,14 +3119,14 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
3119
3119
return RAND;
3120
3120
// fold (and (or x, C), D) -> D if (C & D) == D
3121
3121
if (N1C && N0.getOpcode() == ISD::OR)
3122
- if (ConstantSDNode *ORI = dyn_cast<ConstantSDNode> (N0.getOperand(1)))
3122
+ if (ConstantSDNode *ORI = isConstOrConstSplat (N0.getOperand(1)))
3123
3123
if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue())
3124
3124
return N1;
3125
3125
// fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
3126
3126
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
3127
3127
SDValue N0Op0 = N0.getOperand(0);
3128
3128
APInt Mask = ~N1C->getAPIntValue();
3129
- Mask = Mask.trunc(N0Op0.getValueSizeInBits ());
3129
+ Mask = Mask.trunc(N0Op0.getScalarValueSizeInBits ());
3130
3130
if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
3131
3131
SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
3132
3132
N0.getValueType(), N0Op0);
@@ -3177,7 +3177,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
3177
3177
// that will apply equally to all members of the vector, so AND all the
3178
3178
// lanes of the constant together.
3179
3179
EVT VT = Vector->getValueType(0);
3180
- unsigned BitWidth = VT.getVectorElementType ().getSizeInBits();
3180
+ unsigned BitWidth = VT.getScalarType ().getSizeInBits();
3181
3181
3182
3182
// If the splat value has been compressed to a bitlength lower
3183
3183
// than the size of the vector lane, we need to re-expand it to
@@ -3251,9 +3251,9 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
3251
3251
// fold (and (load x), 255) -> (zextload x, i8)
3252
3252
// fold (and (extload x, i16), 255) -> (zextload x, i8)
3253
3253
// fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8)
3254
- if (N1C && (N0.getOpcode() == ISD::LOAD ||
3255
- (N0.getOpcode() == ISD::ANY_EXTEND &&
3256
- N0.getOperand(0).getOpcode() == ISD::LOAD))) {
3254
+ if (!VT.isVector() && N1C && (N0.getOpcode() == ISD::LOAD ||
3255
+ (N0.getOpcode() == ISD::ANY_EXTEND &&
3256
+ N0.getOperand(0).getOpcode() == ISD::LOAD))) {
3257
3257
bool HasAnyExt = N0.getOpcode() == ISD::ANY_EXTEND;
3258
3258
LoadSDNode *LN0 = HasAnyExt
3259
3259
? cast<LoadSDNode>(N0.getOperand(0))
0 commit comments