1
1
// Test host codegen.
2
- // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
3
- // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
4
- // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu
2
+ // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s
3
+ // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -verify - emit-pch -o %t %s
4
+ // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -include-pch %t %s -emit-llvm -o - | FileCheck %s
5
5
6
- // expected-no-diagnostics
7
6
#ifndef HEADER
8
7
#define HEADER
9
8
@@ -39,6 +38,71 @@ void fie(void) {
39
38
{}
40
39
#pragma omp target uses_allocators(omp_pteam_mem_alloc) allocate(omp_pteam_mem_alloc: x) firstprivate(x)
41
40
{}
41
+ #pragma omp target uses_allocators(omp_thread_mem_alloc) allocate(omp_thread_mem_alloc: x) firstprivate(x) // expected-warning {{allocator with the 'thread' trait access has unspecified behavior on 'target' directive}}
42
+ {}
42
43
}
43
44
44
45
#endif
46
+
47
+ // CHECK: %[[#R0:]] = call i32 @__kmpc_global_thread_num(ptr @1)
48
+ // CHECK-NEXT: store i64 %x, ptr %x.addr, align 8
49
+ // CHECK-NEXT: %.x..void.addr = call ptr @__kmpc_alloc(i32 %[[#R0]], i64 4, ptr null)
50
+ // CHECK-NEXT: %[[#R1:]] = load i32, ptr %x.addr, align 4
51
+ // CHECK-NEXT: store i32 %[[#R1]], ptr %.x..void.addr, align 4
52
+ // CHECK-NEXT: call void @__kmpc_free(i32 %[[#R0]], ptr %.x..void.addr, ptr null)
53
+
54
+ // CHECK: %[[#R0:]] = call i32 @__kmpc_global_thread_num(ptr @1)
55
+ // CHECK-NEXT: store i64 %x, ptr %x.addr, align 8
56
+ // CHECK-NEXT: %.x..void.addr = call ptr @__kmpc_alloc(i32 %[[#R0]], i64 4, ptr inttoptr (i64 1 to ptr))
57
+ // CHECK-NEXT: %[[#R1:]] = load i32, ptr %x.addr, align 4
58
+ // CHECK-NEXT: store i32 %[[#R1]], ptr %.x..void.addr, align 4
59
+ // CHECK-NEXT: call void @__kmpc_free(i32 %[[#R0]], ptr %.x..void.addr, ptr inttoptr (i64 1 to ptr))
60
+
61
+ // CHECK: %[[#R0:]] = call i32 @__kmpc_global_thread_num(ptr @1)
62
+ // CHECK-NEXT: store i64 %x, ptr %x.addr, align 8
63
+ // CHECK-NEXT: %.x..void.addr = call ptr @__kmpc_alloc(i32 %[[#R0]], i64 4, ptr inttoptr (i64 2 to ptr))
64
+ // CHECK-NEXT: %[[#R1:]] = load i32, ptr %x.addr, align 4
65
+ // CHECK-NEXT: store i32 %[[#R1]], ptr %.x..void.addr, align 4
66
+ // CHECK-NEXT: call void @__kmpc_free(i32 %[[#R0]], ptr %.x..void.addr, ptr inttoptr (i64 2 to ptr))
67
+
68
+ // CHECK: %[[#R0:]] = call i32 @__kmpc_global_thread_num(ptr @1)
69
+ // CHECK-NEXT: store i64 %x, ptr %x.addr, align 8
70
+ // CHECK-NEXT: %.x..void.addr = call ptr @__kmpc_alloc(i32 %[[#R0]], i64 4, ptr inttoptr (i64 3 to ptr))
71
+ // CHECK-NEXT: %[[#R1:]] = load i32, ptr %x.addr, align 4
72
+ // CHECK-NEXT: store i32 %[[#R1]], ptr %.x..void.addr, align 4
73
+ // CHECK-NEXT: call void @__kmpc_free(i32 %[[#R0]], ptr %.x..void.addr, ptr inttoptr (i64 3 to ptr))
74
+
75
+ // CHECK: %[[#R0:]] = call i32 @__kmpc_global_thread_num(ptr @1)
76
+ // CHECK-NEXT: store i64 %x, ptr %x.addr, align 8
77
+ // CHECK-NEXT: %.x..void.addr = call ptr @__kmpc_alloc(i32 %[[#R0]], i64 4, ptr inttoptr (i64 4 to ptr))
78
+ // CHECK-NEXT: %[[#R1:]] = load i32, ptr %x.addr, align 4
79
+ // CHECK-NEXT: store i32 %[[#R1]], ptr %.x..void.addr, align 4
80
+ // CHECK-NEXT: call void @__kmpc_free(i32 %[[#R0]], ptr %.x..void.addr, ptr inttoptr (i64 4 to ptr))
81
+
82
+ // CHECK: %[[#R0:]] = call i32 @__kmpc_global_thread_num(ptr @1)
83
+ // CHECK-NEXT: store i64 %x, ptr %x.addr, align 8
84
+ // CHECK-NEXT: %.x..void.addr = call ptr @__kmpc_alloc(i32 %[[#R0]], i64 4, ptr inttoptr (i64 5 to ptr))
85
+ // CHECK-NEXT: %[[#R1:]] = load i32, ptr %x.addr, align 4
86
+ // CHECK-NEXT: store i32 %[[#R1]], ptr %.x..void.addr, align 4
87
+ // CHECK-NEXT: call void @__kmpc_free(i32 %[[#R0]], ptr %.x..void.addr, ptr inttoptr (i64 5 to ptr))
88
+
89
+ // CHECK: %[[#R0:]] = call i32 @__kmpc_global_thread_num(ptr @1)
90
+ // CHECK-NEXT: store i64 %x, ptr %x.addr, align 8
91
+ // CHECK-NEXT: %.x..void.addr = call ptr @__kmpc_alloc(i32 %[[#R0]], i64 4, ptr inttoptr (i64 6 to ptr))
92
+ // CHECK-NEXT: %[[#R1:]] = load i32, ptr %x.addr, align 4
93
+ // CHECK-NEXT: store i32 %[[#R1]], ptr %.x..void.addr, align 4
94
+ // CHECK-NEXT: call void @__kmpc_free(i32 %[[#R0]], ptr %.x..void.addr, ptr inttoptr (i64 6 to ptr))
95
+
96
+ // CHECK: %[[#R0:]] = call i32 @__kmpc_global_thread_num(ptr @1)
97
+ // CHECK-NEXT: store i64 %x, ptr %x.addr, align 8
98
+ // CHECK-NEXT: %.x..void.addr = call ptr @__kmpc_alloc(i32 %[[#R0]], i64 4, ptr inttoptr (i64 7 to ptr))
99
+ // CHECK-NEXT: %[[#R1:]] = load i32, ptr %x.addr, align 4
100
+ // CHECK-NEXT: store i32 %[[#R1]], ptr %.x..void.addr, align 4
101
+ // CHECK-NEXT: call void @__kmpc_free(i32 %[[#R0]], ptr %.x..void.addr, ptr inttoptr (i64 7 to ptr))
102
+
103
+ // CHECK: %[[#R0:]] = call i32 @__kmpc_global_thread_num(ptr @1)
104
+ // CHECK-NEXT: store i64 %x, ptr %x.addr, align 8
105
+ // CHECK-NEXT: %.x..void.addr = call ptr @__kmpc_alloc(i32 %[[#R0]], i64 4, ptr inttoptr (i64 8 to ptr))
106
+ // CHECK-NEXT: %[[#R1:]] = load i32, ptr %x.addr, align 4
107
+ // CHECK-NEXT: store i32 %[[#R1]], ptr %.x..void.addr, align 4
108
+ // CHECK-NEXT: call void @__kmpc_free(i32 %[[#R0]], ptr %.x..void.addr, ptr inttoptr (i64 8 to ptr))
0 commit comments