Skip to content

[SYCL] Remove tests added to intel/llvm-test-suite #2753

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Nov 12, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 60 additions & 0 deletions sycl/test/basic_tests/built-ins.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out

// CUDA does not support printf.
// UNSUPPORTED: cuda
#include <CL/sycl.hpp>

#include <cassert>

namespace s = cl::sycl;

// According to OpenCL C spec, the format string must be in constant address
// space
#ifdef __SYCL_DEVICE_ONLY__
#define CONSTANT __attribute__((opencl_constant))
#else
#define CONSTANT
#endif

static const CONSTANT char format[] = "Hello, World! %d %f\n";

int main() {
s::queue q{};

// Test printf
q.submit([&](s::handler &CGH) {
CGH.single_task<class printf>([=]() {
s::ONEAPI::experimental::printf(format, 123, 1.23);
// CHECK: {{(Hello, World! 123 1.23)?}}
});
}).wait();

s::ONEAPI::experimental::printf(format, 321, 3.21);
// CHECK: {{(Hello, World! 123 1.23)?}}

// Test common
{
s::buffer<s::cl_float, 1> BufMin(s::range<1>(1));
s::buffer<s::cl_float2, 1> BufMax(s::range<1>(1));
q.submit([&](s::handler &cgh) {
auto AccMin = BufMin.get_access<s::access::mode::write>(cgh);
auto AccMax = BufMax.get_access<s::access::mode::write>(cgh);
cgh.single_task<class common>([=]() {
AccMax[0] = s::max(s::cl_float2{0.5f, 2.5}, s::cl_float2{2.3f, 2.3});
AccMin[0] = s::min(s::cl_float{0.5f}, s::cl_float{2.3f});
});
});

auto AccMin = BufMin.template get_access<s::access::mode::read>();
auto AccMax = BufMax.template get_access<s::access::mode::read>();

assert(AccMin[0] == 0.5);
assert(AccMax[0].x() == 2.3f && AccMax[0].y() == 2.5f);
assert(s::min(0.5f, 2.3f) == 0.5);
auto Res = s::max(s::int4{5, 2, 1, 5}, s::int4{3, 3, 4, 2});
assert(Res.x() == 5 && Res.y() == 3 && Res.z() == 4 && Res.w() == 5);
}

return 0;
}
99 changes: 99 additions & 0 deletions sycl/test/extensions/fpga.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
// RUN: %clangxx -fsycl %s -o %t.out

#include <CL/sycl.hpp>
#include <CL/sycl/INTEL/fpga_extensions.hpp>
namespace intelfpga {
template <unsigned ID> struct ethernet_pipe_id {
static constexpr unsigned id = ID;
};

using ethernet_read_pipe =
sycl::INTEL::kernel_readable_io_pipe<ethernet_pipe_id<0>, int, 0>;
using ethernet_write_pipe =
sycl::INTEL::kernel_writeable_io_pipe<ethernet_pipe_id<1>, int, 0>;
} // namespace intelfpga

int main() {
sycl::queue Queue;
/* Check buffer_location property */
sycl::buffer<int, 1> Buf{sycl::range{1}};
Queue.submit([&](sycl::handler &CGH) {
sycl::ONEAPI::accessor_property_list PL{sycl::INTEL::buffer_location<1>};
sycl::accessor Acc(Buf, CGH, sycl::write_only, PL);
CGH.single_task<class Test>([=]() { Acc[0] = 42; });
});
Queue.wait();

auto Acc = Buf.template get_access<sycl::access::mode::read_write>();
assert(Acc[0] == 42 && "Value mismatch");

/*Check FPGA-related device parameters*/
if (!Queue.get_device()
.get_info<cl::sycl::info::device::kernel_kernel_pipe_support>()) {
std::cout << "SYCL_INTEL_data_flow_pipes not supported, skipping"
<< std::endl;
return 0;
}

/*Check pipes interfaces*/
Queue.submit([&](cl::sycl::handler &cgh) {
auto write_acc = Buf.get_access<cl::sycl::access::mode::write>(cgh);

cgh.single_task<class bl_io_transfer>([=]() {
write_acc[0] = intelfpga::ethernet_read_pipe::read();
intelfpga::ethernet_write_pipe::write(write_acc[0]);
});
});

using Pipe = cl::sycl::INTEL::pipe<class PipeName, int>;
cl::sycl::buffer<int, 1> readBuf(1);
Queue.submit([&](cl::sycl::handler &cgh) {
cgh.single_task<class writer>([=]() {
bool SuccessCode = false;
do {
Pipe::write(42, SuccessCode);
} while (!SuccessCode);
});
});

/*Check LSU interface*/
{
cl::sycl::buffer<int, 1> output_buffer(1);
cl::sycl::buffer<int, 1> input_buffer(1);

Queue.submit([&](cl::sycl::handler &cgh) {
auto output_accessor =
output_buffer.get_access<cl::sycl::access::mode::write>(cgh);
auto input_accessor =
input_buffer.get_access<cl::sycl::access::mode::read>(cgh);

cgh.single_task<class kernel>([=] {
auto input_ptr = input_accessor.get_pointer();
auto output_ptr = output_accessor.get_pointer();

using PrefetchingLSU =
cl::sycl::INTEL::lsu<cl::sycl::INTEL::prefetch<true>,
cl::sycl::INTEL::statically_coalesce<false>>;

using BurstCoalescedLSU =
cl::sycl::INTEL::lsu<cl::sycl::INTEL::burst_coalesce<true>,
cl::sycl::INTEL::statically_coalesce<false>>;

using CachingLSU =
cl::sycl::INTEL::lsu<cl::sycl::INTEL::burst_coalesce<true>,
cl::sycl::INTEL::cache<1024>,
cl::sycl::INTEL::statically_coalesce<false>>;

using PipelinedLSU = cl::sycl::INTEL::lsu<>;

int X = PrefetchingLSU::load(input_ptr); // int X = input_ptr[0]
int Y = CachingLSU::load(input_ptr + 1); // int Y = input_ptr[1]

BurstCoalescedLSU::store(output_ptr, X); // output_ptr[0] = X
PipelinedLSU::store(output_ptr + 1, Y); // output_ptr[1] = Y
});
});
}

return 0;
}
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
// RUN: %clangxx -fsycl -fsycl-targets=%sycl_triple %s -I . -o %t.out
// RUN: env SYCL_DEVICE_TYPE=HOST %t.out
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out
// RUN: %ACC_RUN_PLACEHOLDER %t.out
// Group operations are not supported on host device. The test checks that
// compilation succeeded.

// TODO: enable compile+runtime checks for operations defined in SPIR-V 1.3.
// That requires either adding a switch to clang (-spirv-max-version=1.3) or
Expand All @@ -13,7 +11,6 @@
// RUNx: %clangxx -fsycl -fsycl-targets=%sycl_triple -DSPIRV_1_3 %s -I . -o \
%t13.out

#include "support.h"
#include <CL/sycl.hpp>
#include <algorithm>
#include <cassert>
Expand All @@ -22,11 +19,23 @@
using namespace sycl;
using namespace sycl::ONEAPI;

template <class Predicate> class none_of_kernel;

struct GeZero {
bool operator()(int i) const { return i >= 0; }
};
struct IsEven {
bool operator()(int i) const { return (i % 2) == 0; }
};
struct LtZero {
bool operator()(int i) const { return i < 0; }
};

template <typename SpecializationKernelName, typename InputContainer,
typename OutputContainer, class BinaryOperation>
typename OutputContainer, class BinaryOperation, class Predicate>
void test(queue q, InputContainer input, OutputContainer output,
BinaryOperation binary_op,
typename OutputContainer::value_type identity) {
typename OutputContainer::value_type identity, Predicate pred) {
typedef typename InputContainer::value_type InputT;
typedef typename OutputContainer::value_type OutputT;
OutputT init = 42;
Expand All @@ -44,56 +53,36 @@ void test(queue q, InputContainer input, OutputContainer output,
group<1> g = it.get_group();
int lid = it.get_local_id(0);
out[0] = reduce(g, in[lid], binary_op);
out[1] = reduce(g, in[lid], init, binary_op);
out[2] =
reduce(g, in.get_pointer(), in.get_pointer() + N, binary_op);
out[3] = reduce(g, in.get_pointer(), in.get_pointer() + N, init,
binary_op);
out[1] = none_of(g, in[lid], pred);
out[2] = inclusive_scan(g, in[lid], binary_op);
out[3] = exclusive_scan(g, in[lid], binary_op);
out[4] = broadcast(g, in[lid]);
out[5] = any_of(g, in.get_pointer(), in.get_pointer() + N, pred);
out[6] = all_of(g, pred(in[lid]));
if (leader(g)) {
out[7]++;
}
});
});
}
// std::reduce is not implemented yet, so use std::accumulate instead
assert(output[0] == std::accumulate(input.begin(), input.begin() + G,
identity, binary_op));
assert(output[1] ==
std::accumulate(input.begin(), input.begin() + G, init, binary_op));
assert(output[2] ==
std::accumulate(input.begin(), input.end(), identity, binary_op));
assert(output[3] ==
std::accumulate(input.begin(), input.end(), init, binary_op));
}

int main() {
queue q;
if (!isSupportedDevice(q.get_device())) {
std::cout << "Skipping test\n";
return 0;
}

constexpr int N = 128;
std::array<int, N> input;
std::array<int, 4> output;
std::array<int, 8> output;
std::iota(input.begin(), input.end(), 0);
std::fill(output.begin(), output.end(), 0);

test<class KernelNamePlusV>(q, input, output, plus<>(), 0);
test<class KernelNamePlusV>(q, input, output, plus<>(), 0, GeZero());
test<class KernelNameMinimumV>(q, input, output, minimum<>(),
std::numeric_limits<int>::max());
test<class KernelNameMaximumV>(q, input, output, maximum<>(),
std::numeric_limits<int>::lowest());

test<class KernelNamePlusI>(q, input, output, plus<int>(), 0);
test<class KernelNameMinimumI>(q, input, output, minimum<int>(),
std::numeric_limits<int>::max());
test<class KernelNameMaximumI>(q, input, output, maximum<int>(),
std::numeric_limits<int>::lowest());
std::numeric_limits<int>::max(), IsEven());

#ifdef SPIRV_1_3
test<class KernelName_WonwuUVPUPOTKRKIBtT>(q, input, output,
multiplies<int>(), 1);
test<class KernelName_qYBaJDZTMGkdIwD>(q, input, output, bit_or<int>(), 0);
test<class KernelName_eLSFt>(q, input, output, bit_xor<int>(), 0);
test<class KernelName_uFhJnxSVhNAiFPTG>(q, input, output, bit_and<int>(), ~0);
multiplies<int>(), 1, LtZero());
#endif // SPIRV_1_3

std::cout << "Test passed." << std::endl;
Expand Down
Loading