Skip to content
This repository was archived by the owner on Mar 28, 2023. It is now read-only.

Commit 5b8908a

Browse files
authored
[SYCL][CUDA] Reduce required sm version for atomic_ref tests (#985)
Reduces sm version requirement for atomic_ref tests. Testing this properly would require duplicating all the atomic ref tests for sm_50 (as in this PR) and for sm_70 (for system scope atomics). That would duplicate all the slowest tests. To avoid significant increase of test running time I am instead removing testing for system atomics, which have relatively simple implementations. Tests intel/llvm#5998
1 parent 2a19f3b commit 5b8908a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

70 files changed

+60
-100
lines changed

SYCL/AtomicRef/add.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// See https://github.com/intel/llvm-test-suite/issues/867 for detailed status
22
// UNSUPPORTED: hip
33

4-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-device-code-split=per_kernel -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
4+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-device-code-split=per_kernel -fsycl-targets=%sycl_triple %s -o %t.out
55
// RUN: %HOST_RUN_PLACEHOLDER %t.out
66
// RUN: %GPU_RUN_PLACEHOLDER %t.out
77
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -287,10 +287,6 @@ template <access::address_space space, typename T, typename Difference = T,
287287
void add_test_scopes(queue q, size_t N) {
288288
std::vector<memory_scope> scopes =
289289
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
290-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
291-
scopes.end()) {
292-
add_test<space, T, Difference, order, memory_scope::system>(q, N);
293-
}
294290
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
295291
scopes.end()) {
296292
add_test<space, T, Difference, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/add_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_generic_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_generic_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/and.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/and.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -136,10 +136,6 @@ template <access::address_space space, typename T,
136136
void and_test_scopes(queue q) {
137137
std::vector<memory_scope> scopes =
138138
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
139-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
140-
scopes.end()) {
141-
and_test<space, T, order, memory_scope::system>(q);
142-
}
143139
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
144140
scopes.end()) {
145141
and_test<space, T, order, memory_scope::work_group>(q);

SYCL/AtomicRef/and_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/and_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/and_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/compare_exchange.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/compare_exchange.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -155,10 +155,6 @@ template <access::address_space space, typename T,
155155
void compare_exchange_test_scopes(queue q, size_t N) {
156156
std::vector<memory_scope> scopes =
157157
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
158-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
159-
scopes.end()) {
160-
compare_exchange_test<space, T, order, memory_scope::system>(q, N);
161-
}
162158
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
163159
scopes.end()) {
164160
compare_exchange_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/compare_exchange_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/compare_exchange_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/compare_exchange_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/exchange.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/exchange.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -138,10 +138,6 @@ template <access::address_space space, typename T,
138138
void exchange_test_scopes(queue q, size_t N) {
139139
std::vector<memory_scope> scopes =
140140
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
141-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
142-
scopes.end()) {
143-
exchange_test<space, T, order, memory_scope::system>(q, N);
144-
}
145141
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
146142
scopes.end()) {
147143
exchange_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/exchange_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/exchange_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/exchange_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/load.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/load.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -120,10 +120,6 @@ template <access::address_space space, typename T,
120120
void load_test_scopes(queue q, size_t N) {
121121
std::vector<memory_scope> scopes =
122122
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
123-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
124-
scopes.end()) {
125-
load_test<space, T, order, memory_scope::system>(q, N);
126-
}
127123
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
128124
scopes.end()) {
129125
load_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/load_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/load_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/load_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// See https://github.com/intel/llvm-test-suite/issues/867 for detailed status
22
// UNSUPPORTED: hip
33

4-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
4+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
55
// RUN: %HOST_RUN_PLACEHOLDER %t.out
66
// RUN: %GPU_RUN_PLACEHOLDER %t.out
77
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -147,10 +147,6 @@ template <access::address_space space, typename T,
147147
void max_test_scopes(queue q, size_t N) {
148148
std::vector<memory_scope> scopes =
149149
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
150-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
151-
scopes.end()) {
152-
max_test<space, T, order, memory_scope::system>(q, N);
153-
}
154150
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
155151
scopes.end()) {
156152
max_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/max_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_generic_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_generic_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// See https://github.com/intel/llvm-test-suite/issues/867 for detailed status
22
// UNSUPPORTED: hip
33

4-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
4+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
55
// RUN: %HOST_RUN_PLACEHOLDER %t.out
66
// RUN: %GPU_RUN_PLACEHOLDER %t.out
77
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -145,10 +145,6 @@ template <access::address_space space, typename T,
145145
void min_test_scopes(queue q, size_t N) {
146146
std::vector<memory_scope> scopes =
147147
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
148-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
149-
scopes.end()) {
150-
min_test<space, T, order, memory_scope::system>(q, N);
151-
}
152148
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
153149
scopes.end()) {
154150
min_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/min_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_generic_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_generic_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/or.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/or.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -135,10 +135,6 @@ template <access::address_space space, typename T,
135135
void or_test_scopes(queue q) {
136136
std::vector<memory_scope> scopes =
137137
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
138-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
139-
scopes.end()) {
140-
or_test<space, T, order, memory_scope::system>(q);
141-
}
142138
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
143139
scopes.end()) {
144140
or_test<space, T, order, memory_scope::work_group>(q);

SYCL/AtomicRef/or_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

0 commit comments

Comments
 (0)