Skip to content

Remove fatal ET_CHECK for unknown instruction #1515

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 10 additions & 32 deletions runtime/core/exec_aten/util/scalar_type_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,16 @@ ET_FORALL_SCALAR_TYPES(SPECIALIZE_CppTypeToScalarType)
// Utility functions to retrieve metadata for a given ScalarType
//

/**
* Returns true if the parameter is one of the values covered by
* ET_FORALL_SCALAR_TYPES.
*/
inline bool isValid(exec_aten::ScalarType type) {
return static_cast<int8_t>(type) >= 0 &&
type < exec_aten::ScalarType::NumOptions &&
type != exec_aten::ScalarType::Undefined;
}

/**
* Returns the name of a ScalarType as a C string.
*
Expand Down Expand Up @@ -541,38 +551,6 @@ inline exec_aten::ScalarType promoteTypes(
return _promoteTypesLookup[static_cast<int>(a)][static_cast<int>(b)];
}

/**
* Return the size of corresponding ctype given ScalarType.
*/
inline size_t sizeof_scalar_type(exec_aten::ScalarType type) {
// Reject types that are not yet supported or are out of bounds.
ET_CHECK_MSG(
type != exec_aten::ScalarType::Half &&
type != exec_aten::ScalarType::ComplexHalf &&
type != exec_aten::ScalarType::ComplexFloat &&
type != exec_aten::ScalarType::ComplexDouble &&
type != exec_aten::ScalarType::BFloat16 &&
type != exec_aten::ScalarType::Undefined,
"Invalid or unsupported ScalarType %" PRId8,
static_cast<int8_t>(type));

size_t type_size = 0;
#define SCALAR_TYPE_SIZE(ctype, dtype) \
case exec_aten::ScalarType::dtype: \
type_size = sizeof(ctype); \
break;

switch (type) {
ET_FORALL_SCALAR_TYPES(SCALAR_TYPE_SIZE)
default:
ET_CHECK_MSG(
false, "Invalid input ScalarType %" PRId8, static_cast<int8_t>(type));
}
#undef SCALAR_TYPE_SIZE

return type_size;
}

//
// Helper macros for switch case macros (see below)
//
Expand Down
20 changes: 20 additions & 0 deletions runtime/core/exec_aten/util/test/scalar_type_util_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,26 @@ TEST(ScalarTypeUtilTest, ElementSize) {
}
}

TEST(ScalarTypeUtilTest, IsValidTrue) {
// Some valid types.
EXPECT_TRUE(torch::executor::isValid(ScalarType::Byte));
EXPECT_TRUE(torch::executor::isValid(ScalarType::Float));
EXPECT_TRUE(torch::executor::isValid(ScalarType::ComplexFloat));
EXPECT_TRUE(torch::executor::isValid(ScalarType::Bits16));
}

TEST(ScalarTypeUtilTest, IsValidFalse) {
// Undefined, which is sort of a special case since it's not part of the
// iteration macros but is still a part of the enum.
EXPECT_FALSE(torch::executor::isValid(ScalarType::Undefined));

// Some out-of-range types, also demonstrating that NumOptions is not really a
// scalar type.
EXPECT_FALSE(torch::executor::isValid(ScalarType::NumOptions));
EXPECT_FALSE(torch::executor::isValid(static_cast<ScalarType>(127)));
EXPECT_FALSE(torch::executor::isValid(static_cast<ScalarType>(-1)));
}

TEST(ScalarTypeUtilTest, UnknownTypeElementSizeDies) {
// Undefined, which is sort of a special case since it's not part of the
// iteration macros but is still a part of the enum.
Expand Down
8 changes: 4 additions & 4 deletions runtime/core/portable_type/tensor_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,12 @@ TensorImpl::TensorImpl(
data_(data),
dim_(dim),
numel_(compute_numel(sizes, dim)),
capacity_(numel_ * sizeof_scalar_type(type)),
capacity_(numel_ * elementSize(type)),
type_(type),
shape_dynamism_(dynamism) {}

size_t TensorImpl::nbytes() const {
return numel_ * sizeof_scalar_type(type_);
return numel_ * elementSize(type_);
}

ssize_t TensorImpl::size(ssize_t dim) const {
Expand All @@ -78,7 +78,7 @@ ScalarType TensorImpl::scalar_type() const {

// Return the size of one element of the tensor
ssize_t TensorImpl::element_size() const {
return sizeof_scalar_type(type_);
return elementSize(type_);
}

const ArrayRef<TensorImpl::SizesType> TensorImpl::sizes() const {
Expand Down Expand Up @@ -145,7 +145,7 @@ Error TensorImpl::internal_resize_contiguous(ArrayRef<SizesType> new_sizes) {

// Upper bounded tensors can be reshaped but not beyond upper bound
if (shape_dynamism_ == TensorShapeDynamism::DYNAMIC_BOUND) {
auto new_nbytes = new_numel * sizeof_scalar_type(type_);
auto new_nbytes = new_numel * elementSize(type_);
ET_CHECK_OR_RETURN_ERROR(
new_nbytes <= capacity_,
NotSupported,
Expand Down
11 changes: 7 additions & 4 deletions runtime/core/portable_type/test/executor_tensor_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,17 @@ namespace executor {

TEST(TensorTest, InvalidScalarType) {
TensorImpl::SizesType sizes[1] = {1};
// A type that executorch doesn't support yet.
ET_EXPECT_DEATH({ TensorImpl x(ScalarType::BFloat16, 1, sizes); }, "");

// The literal Undefined type.
// Undefined, which is sort of a special case since it's not part of the
// iteration macros but is still a part of the enum.
ET_EXPECT_DEATH({ TensorImpl y(ScalarType::Undefined, 1, sizes); }, "");

// An int value that doesn't map to a valid enum value
// Some out-of-range types, also demonstrating that NumOptions is not really a
// scalar type.
ET_EXPECT_DEATH({ TensorImpl y(ScalarType::NumOptions, 1, sizes); }, "");
ET_EXPECT_DEATH(
{ TensorImpl y(static_cast<ScalarType>(127), 1, sizes); }, "");
ET_EXPECT_DEATH({ TensorImpl y(static_cast<ScalarType>(-1), 1, sizes); }, "");
}

TEST(TensorTest, SetData) {
Expand Down
67 changes: 49 additions & 18 deletions runtime/executor/method.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -215,13 +215,22 @@ namespace {

Result<InstructionArgs> gen_instruction_arguments(
MemoryAllocator* method_allocator,
size_t num_values,
EValue* values,
size_t num_args,
const int32_t* arg_idxs) {
EValue** arg_list =
ET_ALLOCATE_LIST_OR_RETURN_ERROR(method_allocator, EValue*, num_args);
for (size_t i = 0; i < num_args; ++i) {
arg_list[i] = &values[arg_idxs[i]];
int32_t arg_idx = arg_idxs[i];
ET_LOG(Error, "Argument index %d vs num_values %zu", arg_idx, num_values);
ET_CHECK_OR_RETURN_ERROR(
arg_idx < num_values,
InvalidProgram,
"Arg index %d >= %zu",
arg_idx,
num_values);
arg_list[i] = &values[arg_idx];
}
return InstructionArgs(arg_list, num_args);
}
Expand Down Expand Up @@ -388,12 +397,11 @@ Error Method::parse_values() {
// subtract one to keep the output in 0 based indexing for a
// disgruntled debugger seeing this error message and checking
// schema.fbs
ET_CHECK_MSG(
false,
"Enum KernelTypes type: %" PRIu32
" not supported. Please look in executorch/schema/program.fbs "
"to see which type this is.",
ET_LOG(
Error,
"Unknown KernelTypes value %" PRIu32,
static_cast<uint32_t>(serialization_value->val_type()) - 1);
return Error::InvalidProgram;
}

// ~Method() will try to clean up n_value_ entries in the values_ array.
Expand Down Expand Up @@ -449,6 +457,11 @@ Error Method::resolve_operator(
constexpr size_t kTempBufferSizeForName = 100;
char operator_name[kTempBufferSizeForName];
const auto ops = serialization_plan_->operators();
ET_CHECK_OR_RETURN_ERROR(
ops != nullptr && op_index < ops->size(),
InvalidProgram,
"Op index %" PRIu32 " out of range",
op_index);
const auto& op = ops->Get(op_index);

populateOperatorName(op, kTempBufferSizeForName, operator_name);
Expand Down Expand Up @@ -559,11 +572,18 @@ Error Method::init(executorch_flatbuffer::ExecutionPlan* s_plan) {
{
// Load chains
const auto chains = serialization_plan_->chains();
ET_CHECK(chains != nullptr);
n_chains_ = chains->size();
if (chains == nullptr) {
n_chains_ = 0;
chains_ = nullptr;
} else {
n_chains_ = chains->size();
chains_ =
ET_ALLOCATE_LIST_OR_RETURN_ERROR(method_allocator, Chain, n_chains_);
}

chains_ =
ET_ALLOCATE_LIST_OR_RETURN_ERROR(method_allocator, Chain, n_chains_);
// Try resolving all operators before failing, to make it easier to debug
// multiple problems at once.
Error delayed_error = Error::Ok;
int32_t num_instructions_missing_op = 0;
for (size_t i = 0; i < n_chains_; ++i) {
auto s_chain = chains->Get(i);
Expand All @@ -583,7 +603,11 @@ Error Method::init(executorch_flatbuffer::ExecutionPlan* s_plan) {
const auto arg_idxs =
instruction->instr_args_as_KernelCall()->args();
auto res = gen_instruction_arguments(
method_allocator, values_, arg_idxs->size(), arg_idxs->data());
method_allocator,
n_value_,
values_,
arg_idxs->size(),
arg_idxs->data());
if (!res.ok()) {
return res.error();
}
Expand All @@ -598,13 +622,19 @@ Error Method::init(executorch_flatbuffer::ExecutionPlan* s_plan) {
num_instructions_missing_op++;
} else if (err == Error::MemoryAllocationFailed) {
return err;
} else {
delayed_error = err;
}
} break;
case executorch_flatbuffer::InstructionArguments::DelegateCall: {
const auto arg_idxs =
instruction->instr_args_as_DelegateCall()->args();
auto res = gen_instruction_arguments(
method_allocator, values_, arg_idxs->size(), arg_idxs->data());
method_allocator,
n_value_,
values_,
arg_idxs->size(),
arg_idxs->data());
if (!res.ok()) {
return res.error();
}
Expand All @@ -628,6 +658,9 @@ Error Method::init(executorch_flatbuffer::ExecutionPlan* s_plan) {
"There are %d instructions don't have corresponding operator registered. "
"See logs for details",
num_instructions_missing_op);
if (delayed_error != Error::Ok) {
return delayed_error;
}
}

pre_allocated_input_ = false;
Expand Down Expand Up @@ -890,9 +923,6 @@ Method::get_outputs(EValue* output_evalues, size_t length) {
}

Error Method::execute_instruction() {
// TODO(jakeszwe): remove all the ET_CHECKS in this function and properly
// return the error instead

auto& chain = chains_[step_state_.chain_idx];
auto instructions = chain.s_chain_->instructions();

Expand Down Expand Up @@ -1007,10 +1037,11 @@ Error Method::execute_instruction() {
internal::reset_data_ptr(t);
} break;
default:
ET_CHECK_MSG(
false,
"Instruction is not supported. %hhu",
ET_LOG(
Error,
"Unknown instruction: %hhu",
static_cast<uint8_t>(instruction->instr_args_type()));
err = Error::InvalidProgram;
}
// Reset the temp allocator for every instruction.
if (memory_manager_->temp_allocator() != nullptr) {
Expand Down
2 changes: 1 addition & 1 deletion runtime/executor/method_meta.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ size_t calculate_nbytes(
for (ssize_t i = 0; i < sizes.size(); i++) {
n *= sizes[i];
}
return n * sizeof_scalar_type(scalar_type);
return n * torch::executor::elementSize(scalar_type);
}

} // namespace
Expand Down
36 changes: 22 additions & 14 deletions runtime/executor/program.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,30 +146,33 @@ Result<executorch_flatbuffer::ExecutionPlan*> get_execution_plan(

// Constant data may live inside the flatbuffer data (constant_buffer) or in a
// separate segment (constant_segment). It should not be in both.
const auto& constant_buffer = flatbuffer_program->constant_buffer();
const auto& constant_segment = flatbuffer_program->constant_segment();

// Check if the constant data is inside a separate segment.
if (constant_segment != nullptr && constant_segment->offsets()->size() > 0) {
const auto* constant_segment = flatbuffer_program->constant_segment();
if (constant_segment != nullptr && constant_segment->offsets() != nullptr &&
constant_segment->offsets()->size() > 0) {
// The constant data is inside a separate segment.
const auto* constant_buffer = flatbuffer_program->constant_buffer();
ET_CHECK_OR_RETURN_ERROR(
constant_buffer->size() == 0,
InvalidState,
"constant_buffer contains %u items, constant_segment.offsets contains %u items. Only one should be used.",
constant_buffer == nullptr || constant_buffer->size() == 0,
InvalidProgram,
"constant_buffer contains %u items, "
"constant_segment.offsets contains %u items. Only one should be used.",
constant_buffer->size(),
constant_segment->offsets()->size());
const auto* segments = flatbuffer_program->segments();
ET_CHECK_OR_RETURN_ERROR(
segments != nullptr, InvalidProgram, "No segments in program");

// Load constant segment.
// TODO(T171839323): Add test for segment_index > num available segments.
ET_CHECK_OR_RETURN_ERROR(
constant_segment->segment_index() <
flatbuffer_program->segments()->size(),
InvalidArgument,
constant_segment->segment_index() < segments->size(),
InvalidProgram,
"Constant segment index %d invalid for program segments range %d",
constant_segment->segment_index(),
flatbuffer_program->segments()->size());
segments->size());

const executorch_flatbuffer::DataSegment* data_segment =
flatbuffer_program->segments()->Get(constant_segment->segment_index());
segments->Get(constant_segment->segment_index());
Result<FreeableBuffer> constant_segment_data = loader->Load(
segment_base_offset + data_segment->offset(), data_segment->size());
if (!constant_segment_data.ok()) {
Expand Down Expand Up @@ -199,7 +202,12 @@ Result<executorch_flatbuffer::ExecutionPlan*> get_execution_plan(
size_t Program::num_methods() const {
auto internal_program =
static_cast<const executorch_flatbuffer::Program*>(internal_program_);
return internal_program->execution_plan()->size();
const auto execution_plan = internal_program->execution_plan();
if (execution_plan != nullptr) {
return execution_plan->size();
} else {
return 0;
}
}

Result<const char*> Program::get_method_name(size_t plan_index) const {
Expand Down
6 changes: 6 additions & 0 deletions runtime/executor/tensor_parser_aten.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <executorch/runtime/executor/tensor_parser.h>

#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
#include <executorch/runtime/executor/memory_manager.h>
#include <executorch/runtime/executor/program.h>
#include <executorch/runtime/platform/profiler.h>
Expand Down Expand Up @@ -43,6 +44,11 @@ Result<at::Tensor> parseTensor(

// get metadata
at::ScalarType type = static_cast<at::ScalarType>(s_tensor->scalar_type());
ET_CHECK_OR_RETURN_ERROR(
isValid(type),
InvalidProgram,
"Invalid ScalarType %" PRId8,
static_cast<int8_t>(type));
auto options = at::CPU(type).options();

// convert int32 in serialization to int64 for aten
Expand Down
Loading