@@ -2218,7 +2218,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64>, <8 x i16>, i8)
2218
2218
define <8 x i64 > @test_mm512_ternarylogic_epi32 (<8 x i64 > %__A , <8 x i64 > %__B , <8 x i64 > %__C ) {
2219
2219
; CHECK-LABEL: test_mm512_ternarylogic_epi32:
2220
2220
; CHECK: # %bb.0: # %entry
2221
- ; CHECK-NEXT: vpternlogd $4, %zmm2, % zmm1, % zmm0
2221
+ ; CHECK-NEXT: vpternlogd {{.*#+}} zmm0 = zmm1 & ~( zmm0 | zmm2)
2222
2222
; CHECK-NEXT: ret{{[l|q]}}
2223
2223
entry:
2224
2224
%0 = bitcast <8 x i64 > %__A to <16 x i32 >
@@ -2236,13 +2236,13 @@ define <8 x i64> @test_mm512_mask_ternarylogic_epi32(<8 x i64> %__A, i16 zeroext
2236
2236
; X86: # %bb.0: # %entry
2237
2237
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
2238
2238
; X86-NEXT: kmovw %eax, %k1
2239
- ; X86-NEXT: vpternlogd $4, %zmm2, %zmm1, % zmm0 {%k1}
2239
+ ; X86-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} = zmm1 & ~(zmm0 | zmm2)
2240
2240
; X86-NEXT: retl
2241
2241
;
2242
2242
; X64-LABEL: test_mm512_mask_ternarylogic_epi32:
2243
2243
; X64: # %bb.0: # %entry
2244
2244
; X64-NEXT: kmovw %edi, %k1
2245
- ; X64-NEXT: vpternlogd $4, %zmm2, %zmm1, % zmm0 {%k1}
2245
+ ; X64-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} = zmm1 & ~(zmm0 | zmm2)
2246
2246
; X64-NEXT: retq
2247
2247
entry:
2248
2248
%0 = bitcast <8 x i64 > %__A to <16 x i32 >
@@ -2260,13 +2260,13 @@ define <8 x i64> @test_mm512_maskz_ternarylogic_epi32(i16 zeroext %__U, <8 x i64
2260
2260
; X86: # %bb.0: # %entry
2261
2261
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
2262
2262
; X86-NEXT: kmovw %eax, %k1
2263
- ; X86-NEXT: vpternlogd $4, %zmm2, %zmm1, % zmm0 {%k1} {z}
2263
+ ; X86-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = zmm1 & ~(zmm0 | zmm2)
2264
2264
; X86-NEXT: retl
2265
2265
;
2266
2266
; X64-LABEL: test_mm512_maskz_ternarylogic_epi32:
2267
2267
; X64: # %bb.0: # %entry
2268
2268
; X64-NEXT: kmovw %edi, %k1
2269
- ; X64-NEXT: vpternlogd $4, %zmm2, %zmm1, % zmm0 {%k1} {z}
2269
+ ; X64-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = zmm1 & ~(zmm0 | zmm2)
2270
2270
; X64-NEXT: retq
2271
2271
entry:
2272
2272
%0 = bitcast <8 x i64 > %__A to <16 x i32 >
@@ -2282,7 +2282,7 @@ entry:
2282
2282
define <8 x i64 > @test_mm512_ternarylogic_epi64 (<8 x i64 > %__A , <8 x i64 > %__B , <8 x i64 > %__C ) {
2283
2283
; CHECK-LABEL: test_mm512_ternarylogic_epi64:
2284
2284
; CHECK: # %bb.0: # %entry
2285
- ; CHECK-NEXT: vpternlogq $4, %zmm2, % zmm1, % zmm0
2285
+ ; CHECK-NEXT: vpternlogq {{.*#+}} zmm0 = zmm1 & ~( zmm0 | zmm2)
2286
2286
; CHECK-NEXT: ret{{[l|q]}}
2287
2287
entry:
2288
2288
%0 = tail call <8 x i64 > @llvm.x86.avx512.pternlog.q.512 (<8 x i64 > %__A , <8 x i64 > %__B , <8 x i64 > %__C , i32 4 )
@@ -2296,13 +2296,13 @@ define <8 x i64> @test_mm512_mask_ternarylogic_epi64(<8 x i64> %__A, i8 zeroext
2296
2296
; X86: # %bb.0: # %entry
2297
2297
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
2298
2298
; X86-NEXT: kmovw %eax, %k1
2299
- ; X86-NEXT: vpternlogq $4, %zmm2, %zmm1, % zmm0 {%k1}
2299
+ ; X86-NEXT: vpternlogq {{.*#+}} zmm0 {%k1} = zmm1 & ~(zmm0 | zmm2)
2300
2300
; X86-NEXT: retl
2301
2301
;
2302
2302
; X64-LABEL: test_mm512_mask_ternarylogic_epi64:
2303
2303
; X64: # %bb.0: # %entry
2304
2304
; X64-NEXT: kmovw %edi, %k1
2305
- ; X64-NEXT: vpternlogq $4, %zmm2, %zmm1, % zmm0 {%k1}
2305
+ ; X64-NEXT: vpternlogq {{.*#+}} zmm0 {%k1} = zmm1 & ~(zmm0 | zmm2)
2306
2306
; X64-NEXT: retq
2307
2307
entry:
2308
2308
%0 = tail call <8 x i64 > @llvm.x86.avx512.pternlog.q.512 (<8 x i64 > %__A , <8 x i64 > %__B , <8 x i64 > %__C , i32 4 )
@@ -2316,13 +2316,13 @@ define <8 x i64> @test_mm512_maskz_ternarylogic_epi64(i8 zeroext %__U, <8 x i64>
2316
2316
; X86: # %bb.0: # %entry
2317
2317
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
2318
2318
; X86-NEXT: kmovw %eax, %k1
2319
- ; X86-NEXT: vpternlogq $4, %zmm2, %zmm1, % zmm0 {%k1} {z}
2319
+ ; X86-NEXT: vpternlogq {{.*#+}} zmm0 {%k1} {z} = zmm1 & ~(zmm0 | zmm2)
2320
2320
; X86-NEXT: retl
2321
2321
;
2322
2322
; X64-LABEL: test_mm512_maskz_ternarylogic_epi64:
2323
2323
; X64: # %bb.0: # %entry
2324
2324
; X64-NEXT: kmovw %edi, %k1
2325
- ; X64-NEXT: vpternlogq $4, %zmm2, %zmm1, % zmm0 {%k1} {z}
2325
+ ; X64-NEXT: vpternlogq {{.*#+}} zmm0 {%k1} {z} = zmm1 & ~(zmm0 | zmm2)
2326
2326
; X64-NEXT: retq
2327
2327
entry:
2328
2328
%0 = tail call <8 x i64 > @llvm.x86.avx512.pternlog.q.512 (<8 x i64 > %__A , <8 x i64 > %__B , <8 x i64 > %__C , i32 4 )
@@ -6864,7 +6864,7 @@ define i64 @test_mm512_mask_reduce_and_epi64(i8 zeroext %__M, <8 x i64> %__W) {
6864
6864
; X86: # %bb.0: # %entry
6865
6865
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
6866
6866
; X86-NEXT: kmovw %eax, %k1
6867
- ; X86-NEXT: vpternlogd $255, % zmm1, %zmm1, %zmm1
6867
+ ; X86-NEXT: vpternlogd {{.*#+}} zmm1 = -1
6868
6868
; X86-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
6869
6869
; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
6870
6870
; X86-NEXT: vpand %ymm0, %ymm1, %ymm0
@@ -6880,7 +6880,7 @@ define i64 @test_mm512_mask_reduce_and_epi64(i8 zeroext %__M, <8 x i64> %__W) {
6880
6880
; X64-LABEL: test_mm512_mask_reduce_and_epi64:
6881
6881
; X64: # %bb.0: # %entry
6882
6882
; X64-NEXT: kmovw %edi, %k1
6883
- ; X64-NEXT: vpternlogd $255, % zmm1, %zmm1, %zmm1
6883
+ ; X64-NEXT: vpternlogd {{.*#+}} zmm1 = -1
6884
6884
; X64-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
6885
6885
; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
6886
6886
; X64-NEXT: vpand %ymm0, %ymm1, %ymm0
@@ -7200,7 +7200,7 @@ define i32 @test_mm512_mask_reduce_and_epi32(i16 zeroext %__M, <8 x i64> %__W) {
7200
7200
; X86: # %bb.0: # %entry
7201
7201
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
7202
7202
; X86-NEXT: kmovw %eax, %k1
7203
- ; X86-NEXT: vpternlogd $255, % zmm1, %zmm1, %zmm1
7203
+ ; X86-NEXT: vpternlogd {{.*#+}} zmm1 = -1
7204
7204
; X86-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
7205
7205
; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
7206
7206
; X86-NEXT: vpand %ymm0, %ymm1, %ymm0
@@ -7217,7 +7217,7 @@ define i32 @test_mm512_mask_reduce_and_epi32(i16 zeroext %__M, <8 x i64> %__W) {
7217
7217
; X64-LABEL: test_mm512_mask_reduce_and_epi32:
7218
7218
; X64: # %bb.0: # %entry
7219
7219
; X64-NEXT: kmovw %edi, %k1
7220
- ; X64-NEXT: vpternlogd $255, % zmm1, %zmm1, %zmm1
7220
+ ; X64-NEXT: vpternlogd {{.*#+}} zmm1 = -1
7221
7221
; X64-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
7222
7222
; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
7223
7223
; X64-NEXT: vpand %ymm0, %ymm1, %ymm0
@@ -8176,7 +8176,7 @@ define i64 @test_mm512_mask_reduce_min_epu64(i8 zeroext %__M, <8 x i64> %__W) {
8176
8176
; X86: # %bb.0: # %entry
8177
8177
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
8178
8178
; X86-NEXT: kmovw %eax, %k1
8179
- ; X86-NEXT: vpternlogd $255, % zmm1, %zmm1, %zmm1
8179
+ ; X86-NEXT: vpternlogd {{.*#+}} zmm1 = -1
8180
8180
; X86-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
8181
8181
; X86-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[4,5,6,7,0,1,2,3]
8182
8182
; X86-NEXT: vpminuq %zmm0, %zmm1, %zmm0
@@ -8192,7 +8192,7 @@ define i64 @test_mm512_mask_reduce_min_epu64(i8 zeroext %__M, <8 x i64> %__W) {
8192
8192
; X64-LABEL: test_mm512_mask_reduce_min_epu64:
8193
8193
; X64: # %bb.0: # %entry
8194
8194
; X64-NEXT: kmovw %edi, %k1
8195
- ; X64-NEXT: vpternlogd $255, % zmm1, %zmm1, %zmm1
8195
+ ; X64-NEXT: vpternlogd {{.*#+}} zmm1 = -1
8196
8196
; X64-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
8197
8197
; X64-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[4,5,6,7,0,1,2,3]
8198
8198
; X64-NEXT: vpminuq %zmm0, %zmm1, %zmm0
@@ -8778,7 +8778,7 @@ define i32 @test_mm512_mask_reduce_min_epu32(i16 zeroext %__M, <8 x i64> %__W) {
8778
8778
; X86: # %bb.0: # %entry
8779
8779
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
8780
8780
; X86-NEXT: kmovw %eax, %k1
8781
- ; X86-NEXT: vpternlogd $255, % zmm1, %zmm1, %zmm1
8781
+ ; X86-NEXT: vpternlogd {{.*#+}} zmm1 = -1
8782
8782
; X86-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
8783
8783
; X86-NEXT: vextracti64x4 $1, %zmm1, %ymm0
8784
8784
; X86-NEXT: vpminud %ymm0, %ymm1, %ymm0
@@ -8795,7 +8795,7 @@ define i32 @test_mm512_mask_reduce_min_epu32(i16 zeroext %__M, <8 x i64> %__W) {
8795
8795
; X64-LABEL: test_mm512_mask_reduce_min_epu32:
8796
8796
; X64: # %bb.0: # %entry
8797
8797
; X64-NEXT: kmovw %edi, %k1
8798
- ; X64-NEXT: vpternlogd $255, % zmm1, %zmm1, %zmm1
8798
+ ; X64-NEXT: vpternlogd {{.*#+}} zmm1 = -1
8799
8799
; X64-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
8800
8800
; X64-NEXT: vextracti64x4 $1, %zmm1, %ymm0
8801
8801
; X64-NEXT: vpminud %ymm0, %ymm1, %ymm0
0 commit comments