Skip to content

Commit e55f7de

Browse files
committed
[X86][SSE] combineReductionToHorizontal - don't rely on widenSubVector to handle illegal vector types.
Thanks to @asbirlea for reporting the bug.
1 parent 9f80ab1 commit e55f7de

File tree

2 files changed

+113
-1
lines changed

2 files changed

+113
-1
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40123,7 +40123,11 @@ static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
4012340123
Rdx = DAG.getNode(Opc, DL, Lo.getValueType(), Lo, Hi);
4012440124
}
4012540125
} else {
40126-
Rdx = widenSubVector(Rdx, false, Subtarget, DAG, DL, 128);
40126+
if (VecVT == MVT::v4i8)
40127+
Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, Rdx,
40128+
DAG.getUNDEF(MVT::v4i8));
40129+
Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Rdx,
40130+
DAG.getUNDEF(MVT::v8i8));
4012740131
Rdx = getUnpackl(DAG, DL, MVT::v16i8, Rdx, DAG.getUNDEF(MVT::v16i8));
4012840132
Rdx = DAG.getBitcast(MVT::v8i16, Rdx);
4012940133
}

llvm/test/CodeGen/X86/vector-reduce-mul.ll

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2236,6 +2236,114 @@ define i8 @test_v128i8(<128 x i8> %a0) {
22362236
ret i8 %1
22372237
}
22382238

2239+
;
2240+
; Legalization
2241+
;
2242+
2243+
define i8 @illegal_v4i8(i8 %a0, <4 x i8>* %a1) {
2244+
; SSE2-LABEL: illegal_v4i8:
2245+
; SSE2: # %bb.0:
2246+
; SSE2-NEXT: movl %edi, %eax
2247+
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2248+
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2249+
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
2250+
; SSE2-NEXT: pmullw %xmm0, %xmm1
2251+
; SSE2-NEXT: movdqa %xmm1, %xmm0
2252+
; SSE2-NEXT: psrld $16, %xmm0
2253+
; SSE2-NEXT: pmullw %xmm1, %xmm0
2254+
; SSE2-NEXT: movd %xmm0, %ecx
2255+
; SSE2-NEXT: # kill: def $al killed $al killed $eax
2256+
; SSE2-NEXT: mulb %cl
2257+
; SSE2-NEXT: retq
2258+
;
2259+
; SSE41-LABEL: illegal_v4i8:
2260+
; SSE41: # %bb.0:
2261+
; SSE41-NEXT: movl %edi, %eax
2262+
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2263+
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2264+
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
2265+
; SSE41-NEXT: pmullw %xmm0, %xmm1
2266+
; SSE41-NEXT: movdqa %xmm1, %xmm0
2267+
; SSE41-NEXT: psrld $16, %xmm0
2268+
; SSE41-NEXT: pmullw %xmm1, %xmm0
2269+
; SSE41-NEXT: movd %xmm0, %ecx
2270+
; SSE41-NEXT: # kill: def $al killed $al killed $eax
2271+
; SSE41-NEXT: mulb %cl
2272+
; SSE41-NEXT: retq
2273+
;
2274+
; AVX-LABEL: illegal_v4i8:
2275+
; AVX: # %bb.0:
2276+
; AVX-NEXT: movl %edi, %eax
2277+
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2278+
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2279+
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
2280+
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
2281+
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
2282+
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
2283+
; AVX-NEXT: vmovd %xmm0, %ecx
2284+
; AVX-NEXT: # kill: def $al killed $al killed $eax
2285+
; AVX-NEXT: mulb %cl
2286+
; AVX-NEXT: retq
2287+
%ld = load <4 x i8>, <4 x i8>* %a1, align 4
2288+
%rdx = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> %ld)
2289+
%mul = mul i8 %a0, %rdx
2290+
ret i8 %mul
2291+
}
2292+
2293+
define i8 @illegal_v8i8(i8 %a0, <8 x i8>* %a1) {
2294+
; SSE2-LABEL: illegal_v8i8:
2295+
; SSE2: # %bb.0:
2296+
; SSE2-NEXT: movl %edi, %eax
2297+
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
2298+
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2299+
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
2300+
; SSE2-NEXT: pmullw %xmm0, %xmm1
2301+
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
2302+
; SSE2-NEXT: pmullw %xmm1, %xmm0
2303+
; SSE2-NEXT: movdqa %xmm0, %xmm1
2304+
; SSE2-NEXT: psrld $16, %xmm1
2305+
; SSE2-NEXT: pmullw %xmm0, %xmm1
2306+
; SSE2-NEXT: movd %xmm1, %ecx
2307+
; SSE2-NEXT: # kill: def $al killed $al killed $eax
2308+
; SSE2-NEXT: mulb %cl
2309+
; SSE2-NEXT: retq
2310+
;
2311+
; SSE41-LABEL: illegal_v8i8:
2312+
; SSE41: # %bb.0:
2313+
; SSE41-NEXT: movl %edi, %eax
2314+
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
2315+
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
2316+
; SSE41-NEXT: pmullw %xmm0, %xmm1
2317+
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
2318+
; SSE41-NEXT: pmullw %xmm1, %xmm0
2319+
; SSE41-NEXT: movdqa %xmm0, %xmm1
2320+
; SSE41-NEXT: psrld $16, %xmm1
2321+
; SSE41-NEXT: pmullw %xmm0, %xmm1
2322+
; SSE41-NEXT: movd %xmm1, %ecx
2323+
; SSE41-NEXT: # kill: def $al killed $al killed $eax
2324+
; SSE41-NEXT: mulb %cl
2325+
; SSE41-NEXT: retq
2326+
;
2327+
; AVX-LABEL: illegal_v8i8:
2328+
; AVX: # %bb.0:
2329+
; AVX-NEXT: movl %edi, %eax
2330+
; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
2331+
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
2332+
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
2333+
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
2334+
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
2335+
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
2336+
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
2337+
; AVX-NEXT: vmovd %xmm0, %ecx
2338+
; AVX-NEXT: # kill: def $al killed $al killed $eax
2339+
; AVX-NEXT: mulb %cl
2340+
; AVX-NEXT: retq
2341+
%ld = load <8 x i8>, <8 x i8>* %a1, align 4
2342+
%rdx = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %ld)
2343+
%mul = mul i8 %a0, %rdx
2344+
ret i8 %mul
2345+
}
2346+
22392347
declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>)
22402348
declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>)
22412349
declare i64 @llvm.vector.reduce.mul.v8i64(<8 x i64>)

0 commit comments

Comments
 (0)