Skip to content

Fix build breaks from llvm-17 #461

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion backends/xnnpack/runtime/utils/utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ std::pair<float, float> GetMinMax(const Tensor& ft) {
ET_CHECK_MSG(
ft.scalar_type() == ScalarType::Float,
"Expected float tensor but got %hhd",
ft.scalar_type());
static_cast<int8_t>(ft.scalar_type()));
const float* d = ft.const_data_ptr<float>();
for (int i = 0; i < ft.numel(); ++i) {
min = (d[i] < min) ? d[i] : min;
Expand Down
3 changes: 2 additions & 1 deletion kernels/optimized/cpu/op_bmm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,8 @@ Tensor& opt_bmm_out(
switch (scalar_type) {
ET_FORALL_REAL_TYPES(BMM_TENSOR)
default:
ET_CHECK_MSG(false, "Unhandled dtype %hhd", scalar_type);
ET_CHECK_MSG(
false, "Unhandled dtype %hhd", static_cast<int8_t>(scalar_type));
}
#undef BMM_TENSOR

Expand Down
5 changes: 4 additions & 1 deletion kernels/optimized/cpu/op_gelu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,10 @@ Tensor& opt_gelu_out(
// TODO support Double as well
GELU(float, Float)
default:
ET_CHECK_MSG(false, "Unhandled dtype %hhd", input.scalar_type());
ET_CHECK_MSG(
false,
"Unhandled dtype %hhd",
static_cast<int8_t>(input.scalar_type()));
}
#undef GELU

Expand Down
14 changes: 10 additions & 4 deletions kernels/optimized/cpu/op_log_softmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,10 @@ void log_softmax_wrapper(const Tensor& X, int64_t dim, Tensor& out) {
log_softmax_kernel<float, OUT_T>(X, dim, out);
break;
default:
ET_CHECK_MSG(false, "Unhandled input dtype %hhd", input_scalar_type);
ET_CHECK_MSG(
false,
"Unhandled input dtype %hhd",
static_cast<int8_t>(input_scalar_type));
}
}
} // namespace
Expand Down Expand Up @@ -138,12 +141,12 @@ void opt_log_soft_max_check_preconditions(
ET_CHECK_MSG(
out_scalar_type == ScalarType::Float,
"out.scalar_type() %hhd is not Float",
out_scalar_type);
static_cast<int8_t>(out_scalar_type));
auto input_scalar_type = self.scalar_type();
ET_CHECK_MSG(
input_scalar_type == ScalarType::Float,
"self.scalar_type() %hhd is not Float",
input_scalar_type);
static_cast<int8_t>(input_scalar_type));
}

// _log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out)
Expand Down Expand Up @@ -172,7 +175,10 @@ Tensor& opt_log_softmax_out(
log_softmax_wrapper<float>(self, dim, out);
break;
default:
ET_CHECK_MSG(false, "Unhandled out dtype %hhd", out_scalar_type);
ET_CHECK_MSG(
false,
"Unhandled out dtype %hhd",
static_cast<int8_t>(out_scalar_type));
}
return out;
}
Expand Down
5 changes: 4 additions & 1 deletion kernels/optimized/cpu/op_native_layer_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,10 @@ std::tuple<Tensor&, Tensor&, Tensor&> opt_native_layer_norm_out(
// TODO support bfloat16
ET_FORALL_FLOAT_TYPES(LAYER_NORM)
default:
ET_CHECK_MSG(false, "Unhandled dtype %hhd", input.scalar_type());
ET_CHECK_MSG(
false,
"Unhandled dtype %hhd",
static_cast<int8_t>(input.scalar_type()));
}
#undef LAYER_NORM
return {out, mean_out, rstd_out};
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_allclose.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ Tensor& allclose_out(
ET_CHECK_MSG(
out.scalar_type() == ScalarType::Bool,
"Out tensor must be type Bool; saw type %hhd",
out.scalar_type());
static_cast<int8_t>(out.scalar_type()));
ET_CHECK_MSG(
out.numel() == 1,
"Out tensor must be a single element; saw %zu elements",
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_argmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ void check_preconditions(
ET_CHECK_MSG(
out.scalar_type() == ScalarType::Long,
"Expected out tensor to have dtype Long, but got %hhd instead",
out.scalar_type());
static_cast<int8_t>(out.scalar_type()));
ET_CHECK_MSG(
out.dim() == compute_reduced_out_dim(in, dim, keepdim),
"Number of dims of out tensor is not compatible with inputs and params");
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_argmin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ void check_preconditions(
ET_CHECK_MSG(
out.scalar_type() == ScalarType::Long,
"Expected out tensor to have dtype Long, but got %hhd instead",
out.scalar_type());
static_cast<int8_t>(out.scalar_type()));
ET_CHECK_MSG(
out.dim() == compute_reduced_out_dim(in, dim, keepdim),
"Number of dims of out tensor is not compatible with inputs and params");
Expand Down
5 changes: 4 additions & 1 deletion kernels/portable/cpu/op_as_strided_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,10 @@ Tensor& as_strided_copy_out(
switch (self.scalar_type()) {
ET_FORALL_SCALAR_TYPES(AS_STRIDED_COPY_TENSOR)
default:
ET_CHECK_MSG(false, "Unhandled dtype %hhd", self.scalar_type());
ET_CHECK_MSG(
false,
"Unhandled dtype %hhd",
static_cast<int8_t>(self.scalar_type()));
}
#undef AS_STRIDED_COPY_TENSOR
return out;
Expand Down
5 changes: 4 additions & 1 deletion kernels/portable/cpu/op_bitwise_not.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,10 @@ Tensor& bitwise_not_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
in.numel());
});
} else {
ET_CHECK_MSG(false, "Unsupported input dtype %hhd", in.scalar_type());
ET_CHECK_MSG(
false,
"Unsupported input dtype %hhd",
static_cast<int8_t>(in.scalar_type()));
}

return out;
Expand Down
22 changes: 14 additions & 8 deletions kernels/portable/cpu/op_cumsum.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,19 +116,25 @@ Tensor& cumsum_out(
cumsum_tensors<SELF_CTYPE, OUT_CTYPE>(self, dim, out); \
break;

#define CUMSUM_TENSORS(SELF_CTYPE, self_dtype) \
case ScalarType::self_dtype: \
switch (out.scalar_type()) { \
ET_FORALL_REAL_TYPES_WITH(SELF_CTYPE, CUMSUM_IMPL) \
default: \
ET_CHECK_MSG(false, "Unhandled output dtype %hhd", out.scalar_type()); \
} \
#define CUMSUM_TENSORS(SELF_CTYPE, self_dtype) \
case ScalarType::self_dtype: \
switch (out.scalar_type()) { \
ET_FORALL_REAL_TYPES_WITH(SELF_CTYPE, CUMSUM_IMPL) \
default: \
ET_CHECK_MSG( \
false, \
"Unhandled output dtype %hhd", \
static_cast<int8_t>(out.scalar_type())); \
} \
break;

switch (self.scalar_type()) {
ET_FORALL_REAL_TYPES_AND(Bool, CUMSUM_TENSORS)
default:
ET_CHECK_MSG(false, "Unhandled input dtype %hhd", self.scalar_type());
ET_CHECK_MSG(
false,
"Unhandled input dtype %hhd",
static_cast<int8_t>(self.scalar_type()));
}

#undef CUMSUM_TENSORS
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_glu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,8 @@ glu_out(RuntimeContext& ctx, const Tensor& self, int64_t dim, Tensor& out) {
switch (in_dtype) {
ET_FORALL_FLOAT_TYPES(GLU_TENSOR)
default:
ET_CHECK_MSG(false, "Unhandled dtype %hhd", in_dtype);
ET_CHECK_MSG(
false, "Unhandled dtype %hhd", static_cast<int8_t>(in_dtype));
}
#undef GLU_TENSOR
return out;
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_nonzero.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ void check_preconditions(const Tensor& input, const Tensor& output) {
ET_CHECK_MSG(
output.scalar_type() == ScalarType::Long,
"Expected out to be a Long tensor but received %hdd",
output.scalar_type());
static_cast<short>(output.scalar_type()));
ET_CHECK_MSG(
output.dim() == 2,
"Expected out to be a 2d tensor received %zd",
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_ones.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ Tensor& ones_out(RuntimeContext& ctx, IntArrayRef size, Tensor& out) {
ET_CHECK_MSG(
false,
"out tensor should be a real or bool dtype, but got %hhd",
out.scalar_type());
static_cast<int8_t>(out.scalar_type()));
}
#undef ONES_OUT

Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_split_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,8 @@ void check_args(
out[i].scalar_type() == out[0].scalar_type(),
"out[%zu] dtype %hhd != out[0] dtype %hhd",
i,
out[i].scalar_type(),
out[0].scalar_type());
static_cast<int8_t>(out[i].scalar_type()),
static_cast<int8_t>(out[0].scalar_type()));

// All outputs must have the same number of dimensions as the input.
ET_CHECK_MSG(
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_t_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ Tensor& t_copy_out(RuntimeContext& ctx, const Tensor& a, Tensor& out) {
switch (a.scalar_type()) {
ET_FORALL_SCALAR_TYPES(TRANSPOSE_TENSORS)
default:
ET_CHECK_MSG(false, "Unhandled dtype %hhd", a.scalar_type());
ET_CHECK_MSG(
false, "Unhandled dtype %hhd", static_cast<int8_t>(a.scalar_type()));
}

#undef TRANSPOSE_TENSORS
Expand Down
22 changes: 14 additions & 8 deletions kernels/portable/cpu/op_to_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,19 +54,25 @@ Tensor& to_copy_out(
_to_impl<SELF_CTYPE, OUT_CTYPE>(self, out); \
break;

#define CASE_TENSOR_DTYPE(SELF_CTYPE, self_dtype) \
case ScalarType::self_dtype: \
switch (out.scalar_type()) { \
ET_FORALL_REAL_TYPES_AND_WITH(Bool, SELF_CTYPE, TO_IMPL) \
default: \
ET_CHECK_MSG(false, "Unhandled output dtype %hhd", out.scalar_type()); \
} \
#define CASE_TENSOR_DTYPE(SELF_CTYPE, self_dtype) \
case ScalarType::self_dtype: \
switch (out.scalar_type()) { \
ET_FORALL_REAL_TYPES_AND_WITH(Bool, SELF_CTYPE, TO_IMPL) \
default: \
ET_CHECK_MSG( \
false, \
"Unhandled output dtype %hhd", \
static_cast<int8_t>(out.scalar_type())); \
} \
break;

switch (self.scalar_type()) {
ET_FORALL_REAL_TYPES_AND(Bool, CASE_TENSOR_DTYPE);
default:
ET_CHECK_MSG(false, "Unhandled input dtype %hhd", self.scalar_type());
ET_CHECK_MSG(
false,
"Unhandled input dtype %hhd",
static_cast<int8_t>(self.scalar_type()));
}

#undef CASE_TENSOR_DTYPE
Expand Down
3 changes: 2 additions & 1 deletion kernels/portable/cpu/op_transpose_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ Tensor& transpose_copy_int_out(
switch (a.scalar_type()) {
ET_FORALL_SCALAR_TYPES(TRANSPOSE_TENSORS)
default:
ET_CHECK_MSG(false, "Unhandled dtype %hhd", a.scalar_type());
ET_CHECK_MSG(
false, "Unhandled dtype %hhd", static_cast<int8_t>(a.scalar_type()));
}

#undef TRANSPOSE_TENSORS
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_tril.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ Tensor& tril_out(
ET_CHECK_MSG(
false,
"out tensor should be a real or bool dtype, but got %hhd",
out.scalar_type());
static_cast<int8_t>(out.scalar_type()));
}
#undef TRIL_OUT

Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_unbind_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ void check_args(const Tensor& input, int64_t dim, TensorList out) {
out[i].scalar_type() == out[0].scalar_type(),
"out[%zu] dtype %hhd != out[0] dtype %hhd",
i,
out[i].scalar_type(),
out[0].scalar_type());
static_cast<int8_t>(out[i].scalar_type()),
static_cast<int8_t>(out[0].scalar_type()));

// output tensor must have # of dims = input.dim() -1
ET_CHECK_MSG(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ Tensor& unary_ufunc_realb_to_bool(
ET_CHECK_MSG(
out.scalar_type() == exec_aten::ScalarType::Bool,
"Expected out tensor to have dtype Bool, but got %hhd instead.",
out.scalar_type());
static_cast<int8_t>(out.scalar_type()));

const auto in_type = in.scalar_type();

Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/util/index_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ bool indices_list_is_valid(
ET_LOG_MSG_AND_RETURN_IF_FALSE(
false,
"%hhd scalar type is not supported for indices",
index.scalar_type());
static_cast<int8_t>(index.scalar_type()));
}
}
return true;
Expand Down
3 changes: 2 additions & 1 deletion kernels/quantized/cpu/op_add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,8 @@ Tensor& quantized_add_out(
switch (a.scalar_type()) {
ET_FORALL_INT_TYPES(ADD_TENSORS)
default:
ET_CHECK_MSG(false, "Unhandled dtype %hhd", a.scalar_type());
ET_CHECK_MSG(
false, "Unhandled dtype %hhd", static_cast<int8_t>(a.scalar_type()));
}

#undef ADD_TENSORS
Expand Down
6 changes: 3 additions & 3 deletions kernels/quantized/cpu/op_choose_qparams.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,15 @@ void check_quantize_per_tensor_args(
ET_CHECK_MSG(
input.scalar_type() == ScalarType::Float,
"Expected input to be Float tensor received: %hdd",
input.scalar_type());
static_cast<short>(input.scalar_type()));
ET_CHECK_MSG(
scale_out.scalar_type() == ScalarType::Double,
"Expected scale to be Double tensor received: %hdd",
scale_out.scalar_type());
static_cast<short>(scale_out.scalar_type()));
ET_CHECK_MSG(
zero_point_out.scalar_type() == ScalarType::Long,
"Expected scale to be Long tensor received: %hdd",
zero_point_out.scalar_type());
static_cast<short>(zero_point_out.scalar_type()));
ET_CHECK_MSG(
scale_out.numel() == 1,
"Exepcted scale to only have one element received: %zd",
Expand Down
32 changes: 19 additions & 13 deletions kernels/quantized/cpu/op_dequantize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,17 +41,17 @@ void check_dequantize_per_tensor_args(
input.scalar_type() == ScalarType::Short ||
input.scalar_type() == ScalarType::Int,
"input.scalar_type() %hdd is not supported:",
input.scalar_type());
static_cast<short>(input.scalar_type()));

ET_CHECK_MSG(
input.scalar_type() == dtype,
"input.scalar_type() %hdd is not matching dtype argumenta:",
input.scalar_type());
static_cast<short>(input.scalar_type()));

ET_CHECK_MSG(
out.scalar_type() == ScalarType::Float,
"out.scalar_type() %hdd is not supported:",
out.scalar_type());
static_cast<short>(out.scalar_type()));

ET_CHECK_MSG(
quant_min <= quant_max,
Expand Down Expand Up @@ -100,19 +100,25 @@ Tensor& dequantize_per_tensor_out(
static_cast<float>(scale)); \
} \
break;
#define CALCULATE_INT_TYPE(IN_CTYPE, in_dtype) \
case ScalarType::in_dtype: \
switch (out.scalar_type()) { \
ET_FORALL_FLOAT_TYPES_WITH(IN_CTYPE, DEQUANTIZE_IMPL); \
default: \
ET_CHECK_MSG(false, "Unhandled output dtype %hhd", out.scalar_type()); \
} \
#define CALCULATE_INT_TYPE(IN_CTYPE, in_dtype) \
case ScalarType::in_dtype: \
switch (out.scalar_type()) { \
ET_FORALL_FLOAT_TYPES_WITH(IN_CTYPE, DEQUANTIZE_IMPL); \
default: \
ET_CHECK_MSG( \
false, \
"Unhandled output dtype %hhd", \
static_cast<int8_t>(out.scalar_type())); \
} \
break;

switch (input.scalar_type()) {
ET_FORALL_INT_TYPES(CALCULATE_INT_TYPE);
default:
ET_CHECK_MSG(false, "Unhandled input dtype %hhd", input.scalar_type());
ET_CHECK_MSG(
false,
"Unhandled input dtype %hhd",
static_cast<int8_t>(input.scalar_type()));
}

#undef CALCULATE_FLOAT_TYPE
Expand All @@ -131,11 +137,11 @@ Tensor& dequantize_per_tensor_tensor_args_out(
ET_CHECK_MSG(
scale.scalar_type() == ScalarType::Double,
"Expected scale to be Double tensor received: %hdd",
scale.scalar_type());
static_cast<short>(scale.scalar_type()));
ET_CHECK_MSG(
zero_point.scalar_type() == ScalarType::Long,
"Expected scale to be Long tensor received: %hdd",
zero_point.scalar_type());
static_cast<short>(zero_point.scalar_type()));
ET_CHECK_MSG(
scale.numel() == 1,
"Exepcted scale to only have one element received: %zd",
Expand Down
Loading