Skip to content

Update kernels for non-fatal (part 1) #2115

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 13 additions & 4 deletions kernels/optimized/cpu/op_add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,17 @@ Tensor& opt_add_out(
a_type != ScalarType::Half) {
// Resize for dynamic shape
auto error = resize_tensor(out, a.sizes());
ET_CHECK_MSG(error == Error::Ok, "Failed to resize output tensor.");
ET_KERNEL_CHECK_MSG(
ctx,
error == Error::Ok,
InvalidArgument,
out,
"Failed to resize output tensor.");

ET_SWITCH_REALB_TYPES(a_type, ctx, "add.out", CTYPE, [&]() {
CTYPE alpha_val;
ET_EXTRACT_SCALAR(alpha, alpha_val);
ET_KERNEL_CHECK(
ctx, utils::extract_scalar(alpha, &alpha_val), InvalidArgument, );

using Vec = executorch::vec::Vectorized<CTYPE>;
executorch::vec::map2<CTYPE>(
Expand All @@ -53,7 +59,7 @@ Tensor& opt_add_out(
} else {
ScalarType common_type =
promoteTypes(a_type, b_type, /*half_to_float*/ true);
ET_CHECK(canCast(common_type, out_type));
ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out);

ET_KERNEL_CHECK(
ctx,
Expand All @@ -66,7 +72,10 @@ Tensor& opt_add_out(
ET_SWITCH_REALB_TYPES(common_type, ctx, "add.out", CTYPE_IN, [&]() {
ET_SWITCH_REALHB_TYPES(out_type, ctx, "add.out", CTYPE_OUT, [&]() {
CTYPE_IN alpha_val;
ET_EXTRACT_SCALAR(alpha, alpha_val);
ET_KERNEL_CHECK(
ctx,
utils::extract_scalar(alpha, &alpha_val),
InvalidArgument, );

apply_binary_elementwise_fn<CTYPE_A, CTYPE_B, CTYPE_OUT>(
[alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) {
Expand Down
4 changes: 4 additions & 0 deletions kernels/portable/cpu/op_add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ Tensor& add_out(
InvalidArgument,
out);

ET_KERNEL_CHECK(ctx, tensor_is_realhb_type(out), InvalidArgument, out);

ScalarType a_type = a.scalar_type();
ScalarType b_type = b.scalar_type();
ScalarType alpha_type = utils::get_scalar_dtype(alpha);
Expand Down Expand Up @@ -81,6 +83,8 @@ Tensor& add_scalar_out(
out,
"Failed to resize output tensor.");

ET_KERNEL_CHECK(ctx, tensor_is_realhb_type(out), InvalidArgument, out);

ScalarType a_type = a.scalar_type();
ScalarType b_type = utils::get_scalar_dtype(b);
ScalarType alpha_type = utils::get_scalar_dtype(alpha);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ Tensor& unary_ufunc_realhb_to_floath(
Tensor& out) {
(void)ctx;

ET_KERNEL_CHECK(ctx, tensor_is_floating_type(out), InvalidArgument, out);

// Resize for dynamic shape
ET_KERNEL_CHECK_MSG(
ctx,
Expand Down
1 change: 1 addition & 0 deletions kernels/portable/cpu/util/activation_ops_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ namespace executor {

bool check_gelu_args(const Tensor& in, string_view approximate, Tensor& out) {
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out));
ET_LOG_AND_RETURN_IF_FALSE(in.scalar_type() != ScalarType::Bool);
ET_LOG_MSG_AND_RETURN_IF_FALSE(
approximate == "tanh" || approximate == "none",
"Invalid approximation format: %.*s for gelu",
Expand Down
5 changes: 4 additions & 1 deletion kernels/portable/cpu/util/broadcast_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,10 @@ Tensor broadcast_tensor(
repeats[i] = 1;
}
}
repeat_tensor(broadcast_from, makeArrayRef(repeats, ndim), out);

ET_CHECK(
repeat_tensor(broadcast_from, makeArrayRef(repeats, ndim), out) ==
Error::Ok);

free(repeats);

Expand Down
8 changes: 4 additions & 4 deletions kernels/portable/cpu/util/broadcast_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ __ET_DEPRECATED exec_aten::Tensor broadcast_tensor(
* @param[out] out_dim The dimension of the broadcasted target
* tensor
*/
[[nodiscard]] Error get_broadcast_target_size(
__ET_NODISCARD Error get_broadcast_target_size(
const exec_aten::ArrayRef<Tensor::SizesType> a_size,
const exec_aten::ArrayRef<Tensor::SizesType> b_size,
Tensor::SizesType* out_sizes,
Expand All @@ -115,7 +115,7 @@ __ET_DEPRECATED exec_aten::Tensor broadcast_tensor(
* @param[out] out_dim The dimension of the broadcasted target
* tensor
*/
[[nodiscard]] Error get_broadcast_target_size(
__ET_NODISCARD Error get_broadcast_target_size(
const Tensor& a,
const Tensor& b,
Tensor::SizesType* out_sizes,
Expand All @@ -130,7 +130,7 @@ __ET_DEPRECATED exec_aten::Tensor broadcast_tensor(
* @param[in] b The second tensor going to be broadcasted.
* @param[out] out The output tensor that will be resized.
*/
[[nodiscard]] inline Error
__ET_NODISCARD inline Error
resize_to_broadcast_target_size(const Tensor& a, const Tensor& b, Tensor& out) {
Tensor::SizesType expected_output_size[kTensorDimensionLimit];
size_t expected_output_dim = 0;
Expand All @@ -156,7 +156,7 @@ resize_to_broadcast_target_size(const Tensor& a, const Tensor& b, Tensor& out) {
* @param[in] c The third tensor going to be broadcasted.
* @param[out] out The output tensor that will be resized.
*/
[[nodiscard]] inline Error resize_to_broadcast_target_size(
__ET_NODISCARD inline Error resize_to_broadcast_target_size(
const Tensor& a,
const Tensor& b,
const Tensor& c,
Expand Down
4 changes: 4 additions & 0 deletions kernels/portable/cpu/util/copy_ops_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ bool check_cat_args(
// Ensure dim is in range.
ET_LOG_AND_RETURN_IF_FALSE(
tensors[ref_i].numel() == 0 || tensors[ref_i].dim() > dim);
ET_LOG_AND_RETURN_IF_FALSE(dim >= 0);

return true;
}
Expand Down Expand Up @@ -378,6 +379,7 @@ bool check_slice_copy_args(
int64_t dim,
int64_t step,
Tensor& out) {
ET_LOG_AND_RETURN_IF_FALSE(in.dim() > 0);
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out));
ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim));
ET_LOG_MSG_AND_RETURN_IF_FALSE(
Expand Down Expand Up @@ -737,6 +739,8 @@ bool check_unsqueeze_copy_args(
const Tensor input,
int64_t dim,
const Tensor out) {
ET_LOG_AND_RETURN_IF_FALSE(dim >= 0);

// The input and out shall share same dtype
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(input, out));

Expand Down
2 changes: 2 additions & 0 deletions kernels/portable/cpu/util/kernel_ops_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -462,6 +462,8 @@ bool check_slice_scatter_args(
int64_t num_values,
int64_t step,
Tensor output) {
ET_LOG_AND_RETURN_IF_FALSE(input.dim() > 0);

// Check dim. The dim planed to be selected on shall exist in input
ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, input.dim()));

Expand Down
31 changes: 18 additions & 13 deletions kernels/portable/cpu/util/repeat_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@ using Tensor = exec_aten::Tensor;

namespace {

void check_repeat_args(
bool check_repeat_args(
Tensor self,
exec_aten::ArrayRef<int64_t> repeats,
Tensor& out) {
// Ensure the self tensors list is non-empty.
ET_CHECK_MSG(
ET_LOG_MSG_AND_RETURN_IF_FALSE(
repeats.size() >= self.dim(),
"Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor");

Expand All @@ -34,11 +34,11 @@ void check_repeat_args(
for (auto repeat : repeats) {
all_non_negative = all_non_negative && (repeat >= 0);
}
ET_CHECK_MSG(
ET_LOG_MSG_AND_RETURN_IF_FALSE(
all_non_negative, "Trying to create tensor with negative dimension");

/// Check if out.size() is legal.
ET_CHECK_MSG(
ET_LOG_MSG_AND_RETURN_IF_FALSE(
out.dim() == repeats.size(),
"The dimension of out shall equal size of repeats, but now is %zd and %zd",
out.dim(),
Expand All @@ -47,12 +47,12 @@ void check_repeat_args(
// Right now we only support the tensors whose dimension is no greater than
// kTensorDimensionLimit. Only check out tensor because the number of
// dimension of out tensor shall have more than or equal to self tensor
ET_CHECK_MSG(
ET_LOG_MSG_AND_RETURN_IF_FALSE(
out.dim() <= kTensorDimensionLimit,
"The dimension of input and output should not be larger than %zd",
kTensorDimensionLimit);

ET_CHECK_SAME_DTYPE2(out, self);
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(out, self));

// We pad one to the beginning of self.size() to make its length equal
// repeats, and called it reformat_self_size. We then make point-to-point mul
Expand All @@ -66,13 +66,15 @@ void check_repeat_args(
reformat_self_size[out.dim() - 1 - i] = self.size(self.dim() - 1 - i);
}
for (size_t i = 0; i < repeats.size(); i++) {
ET_CHECK_MSG(
ET_LOG_MSG_AND_RETURN_IF_FALSE(
reformat_self_size[i] * repeats[i] == out.size(i),
"Expect out size at dimension %zu is %" PRId64 ", but now is %zd",
i,
reformat_self_size[i] * repeats[i],
out.size(i));
}

return true;
}

// Given the indices to a point in an n-D tensor, and the stride (in bytes)
Expand Down Expand Up @@ -163,16 +165,19 @@ void repeat_internal(

// TODO(gasoonjia): dynamic allocate array to support tensor dimension larger
// than kTensorDimensionLimit.
Tensor& repeat_tensor(
Error repeat_tensor(
const Tensor& self,
exec_aten::ArrayRef<int64_t> repeats,
Tensor& out) {
// Assert that the args are valid.
check_repeat_args(self, repeats, out);
// Verify that the args are valid.
ET_CHECK_OR_RETURN_ERROR(
check_repeat_args(self, repeats, out),
InvalidArgument,
"Repeat arguments are invalid.");

// Returns out if out.numel == 0, nothing needs to be repeated.
if (out.numel() == 0) {
return out;
return Error::Ok;
}

ssize_t element_size = out.element_size();
Expand All @@ -183,7 +188,7 @@ Tensor& repeat_tensor(
const char* src = self.const_data_ptr<char>();
char* dest = out.mutable_data_ptr<char>();
memcpy(dest, src, element_size);
return out;
return Error::Ok;
}

// Treats zero-dim self as one-dim tensor with size {1}.
Expand Down Expand Up @@ -274,7 +279,7 @@ Tensor& repeat_tensor(
accum_offset *= out.size(i);
}

return out;
return Error::Ok;
}

} // namespace executor
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/util/repeat_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ namespace executor {
* @param[in] The number of times to repeat this tensor along each dimension
* @param[in] Output tensor to write to.
*
* @returns Repeated tensor.
* @returns The status of the repeat operation.
*/
exec_aten::Tensor& repeat_tensor(
Error repeat_tensor(
const exec_aten::Tensor& in,
exec_aten::ArrayRef<int64_t> repeats,
exec_aten::Tensor& out);
Expand Down
1 change: 1 addition & 0 deletions kernels/portable/cpu/util/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def define_common_targets():
],
exported_headers = ["repeat_util.h"],
deps = [
"//executorch/runtime/kernel:kernel_includes",
"//executorch/runtime/core/exec_aten/util:scalar_type_util",
"//executorch/runtime/core/exec_aten/util:tensor_util",
],
Expand Down
73 changes: 41 additions & 32 deletions kernels/portable/test/op_allclose_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
#include <executorch/test/utils/DeathTest.h>

#include <gtest/gtest.h>
#include <cmath>
Expand Down Expand Up @@ -188,14 +189,16 @@ TEST(OpAllCloseTest, MismatchedInputShapesDeath) {
TensorFactory<ScalarType::Bool> tf_bool;
Tensor out = tf_bool.zeros(/*sizes=*/{1});

ET_EXPECT_KERNEL_FAILURE(allclose_out(
a,
b,
default_rtol,
default_atol,
/*equal_nan=*/false,
/*dummy_param=*/false,
out));
ET_EXPECT_DEATH(
allclose_out(
a,
b,
default_rtol,
default_atol,
/*equal_nan=*/false,
/*dummy_param=*/false,
out),
"");
}

TEST(OpAllCloseTest, MismatchedInputDtypesDeath) {
Expand All @@ -208,14 +211,16 @@ TEST(OpAllCloseTest, MismatchedInputDtypesDeath) {
TensorFactory<ScalarType::Bool> tf_bool;
Tensor out = tf_bool.zeros(/*sizes=*/{1});

ET_EXPECT_KERNEL_FAILURE(allclose_out(
a,
b,
default_rtol,
default_atol,
/*equal_nan=*/false,
/*dummy_param=*/false,
out));
ET_EXPECT_DEATH(
allclose_out(
a,
b,
default_rtol,
default_atol,
/*equal_nan=*/false,
/*dummy_param=*/false,
out),
"");
}

TEST(OpAllCloseTest, IncorrectOutputDtypeDeath) {
Expand All @@ -224,14 +229,16 @@ TEST(OpAllCloseTest, IncorrectOutputDtypeDeath) {
Tensor b = tf_float.ones(/*sizes=*/{2, 2});
Tensor out = tf_float.zeros(/*sizes=*/{1});

ET_EXPECT_KERNEL_FAILURE(allclose_out(
a,
b,
default_rtol,
default_atol,
/*equal_nan=*/false,
/*dummy_param=*/false,
out));
ET_EXPECT_DEATH(
allclose_out(
a,
b,
default_rtol,
default_atol,
/*equal_nan=*/false,
/*dummy_param=*/false,
out),
"");
}

TEST(OpAllCloseTest, IncorrectOutputShapeDeath) {
Expand All @@ -241,14 +248,16 @@ TEST(OpAllCloseTest, IncorrectOutputShapeDeath) {
TensorFactory<ScalarType::Bool> tf_bool;
Tensor out = tf_bool.zeros(/*sizes=*/{2, 2});

ET_EXPECT_KERNEL_FAILURE(allclose_out(
a,
b,
default_rtol,
default_atol,
/*equal_nan=*/false,
/*dummy_param=*/false,
out));
ET_EXPECT_DEATH(
allclose_out(
a,
b,
default_rtol,
default_atol,
/*equal_nan=*/false,
/*dummy_param=*/false,
out),
"");
}

TEST(OpAllCloseTest, FloatTensorsVaryWithinRelativeTolerance) {
Expand Down
Loading