|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -instcombine -S < %s | FileCheck %s |
| 3 | +; RUN: opt -passes=instcombine -S < %s | FileCheck %s |
| 4 | + |
| 5 | +; Prohibit poiter cast for amx. |
| 6 | +define dso_local void @test_amx_load_store(<256 x i32>* %src, i8* %dst) { |
| 7 | +; CHECK-LABEL: @test_amx_load_store( |
| 8 | +; CHECK-NEXT: entry: |
| 9 | +; CHECK-NEXT: [[VEC:%.*]] = load <256 x i32>, <256 x i32>* [[SRC:%.*]], align 64 |
| 10 | +; CHECK-NEXT: [[BC:%.*]] = bitcast <256 x i32> [[VEC]] to x86_amx |
| 11 | +; CHECK-NEXT: tail call void @llvm.x86.tilestored64.internal(i16 16, i16 16, i8* [[DST:%.*]], i64 64, x86_amx [[BC]]) |
| 12 | +; CHECK-NEXT: ret void |
| 13 | +; |
| 14 | +entry: |
| 15 | + %vec = load <256 x i32>, <256 x i32>* %src, align 64 |
| 16 | + %bc = bitcast <256 x i32> %vec to x86_amx |
| 17 | + tail call void @llvm.x86.tilestored64.internal(i16 16, i16 16, i8* %dst, i64 64, x86_amx %bc) |
| 18 | + ret void |
| 19 | +} |
| 20 | + |
| 21 | +; Prohibit poiter cast for amx. |
| 22 | +define dso_local void @test_amx_load_store2(<256 x i32>* %dst, i8* %src) { |
| 23 | +; CHECK-LABEL: @test_amx_load_store2( |
| 24 | +; CHECK-NEXT: entry: |
| 25 | +; CHECK-NEXT: [[AMX:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, i8* [[SRC:%.*]], i64 64) |
| 26 | +; CHECK-NEXT: [[BC:%.*]] = bitcast x86_amx [[AMX]] to <256 x i32> |
| 27 | +; CHECK-NEXT: store <256 x i32> [[BC]], <256 x i32>* [[DST:%.*]], align 1024 |
| 28 | +; CHECK-NEXT: ret void |
| 29 | +; |
| 30 | +entry: |
| 31 | + %amx = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, i8* %src, i64 64) |
| 32 | + %bc = bitcast x86_amx %amx to <256 x i32> |
| 33 | + store <256 x i32> %bc, <256 x i32>* %dst |
| 34 | + ret void |
| 35 | +} |
| 36 | + |
| 37 | +declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64) |
| 38 | +declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx) |
0 commit comments