Skip to content

Commit a3e6480

Browse files
thevinsterfacebook-github-bot
authored andcommitted
Fix build breaks from llvm-17 (#461)
Summary: Pull Request resolved: #461 Clang-17 is stricter around format strings and ambiguous headers. * Cast the enums to the right format type * Namespace `format_to` methods with `fmt` since libc++ 17 introduces its own std::format Reviewed By: lanza Differential Revision: D49556599 fbshipit-source-id: eb9ebed3aa1bac844dac83dc5b0118106978a839
1 parent 89b7e89 commit a3e6480

32 files changed

+175
-117
lines changed

backends/xnnpack/runtime/utils/utils.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ std::pair<float, float> GetMinMax(const Tensor& ft) {
171171
ET_CHECK_MSG(
172172
ft.scalar_type() == ScalarType::Float,
173173
"Expected float tensor but got %hhd",
174-
ft.scalar_type());
174+
static_cast<int8_t>(ft.scalar_type()));
175175
const float* d = ft.const_data_ptr<float>();
176176
for (int i = 0; i < ft.numel(); ++i) {
177177
min = (d[i] < min) ? d[i] : min;

kernels/optimized/cpu/op_bmm.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,8 @@ Tensor& opt_bmm_out(
148148
switch (scalar_type) {
149149
ET_FORALL_REAL_TYPES(BMM_TENSOR)
150150
default:
151-
ET_CHECK_MSG(false, "Unhandled dtype %hhd", scalar_type);
151+
ET_CHECK_MSG(
152+
false, "Unhandled dtype %hhd", static_cast<int8_t>(scalar_type));
152153
}
153154
#undef BMM_TENSOR
154155

kernels/optimized/cpu/op_gelu.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,10 @@ Tensor& opt_gelu_out(
120120
// TODO support Double as well
121121
GELU(float, Float)
122122
default:
123-
ET_CHECK_MSG(false, "Unhandled dtype %hhd", input.scalar_type());
123+
ET_CHECK_MSG(
124+
false,
125+
"Unhandled dtype %hhd",
126+
static_cast<int8_t>(input.scalar_type()));
124127
}
125128
#undef GELU
126129

kernels/optimized/cpu/op_log_softmax.cpp

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,10 @@ void log_softmax_wrapper(const Tensor& X, int64_t dim, Tensor& out) {
108108
log_softmax_kernel<float, OUT_T>(X, dim, out);
109109
break;
110110
default:
111-
ET_CHECK_MSG(false, "Unhandled input dtype %hhd", input_scalar_type);
111+
ET_CHECK_MSG(
112+
false,
113+
"Unhandled input dtype %hhd",
114+
static_cast<int8_t>(input_scalar_type));
112115
}
113116
}
114117
} // namespace
@@ -138,12 +141,12 @@ void opt_log_soft_max_check_preconditions(
138141
ET_CHECK_MSG(
139142
out_scalar_type == ScalarType::Float,
140143
"out.scalar_type() %hhd is not Float",
141-
out_scalar_type);
144+
static_cast<int8_t>(out_scalar_type));
142145
auto input_scalar_type = self.scalar_type();
143146
ET_CHECK_MSG(
144147
input_scalar_type == ScalarType::Float,
145148
"self.scalar_type() %hhd is not Float",
146-
input_scalar_type);
149+
static_cast<int8_t>(input_scalar_type));
147150
}
148151

149152
// _log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out)
@@ -172,7 +175,10 @@ Tensor& opt_log_softmax_out(
172175
log_softmax_wrapper<float>(self, dim, out);
173176
break;
174177
default:
175-
ET_CHECK_MSG(false, "Unhandled out dtype %hhd", out_scalar_type);
178+
ET_CHECK_MSG(
179+
false,
180+
"Unhandled out dtype %hhd",
181+
static_cast<int8_t>(out_scalar_type));
176182
}
177183
return out;
178184
}

kernels/optimized/cpu/op_native_layer_norm.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,10 @@ std::tuple<Tensor&, Tensor&, Tensor&> opt_native_layer_norm_out(
160160
// TODO support bfloat16
161161
ET_FORALL_FLOAT_TYPES(LAYER_NORM)
162162
default:
163-
ET_CHECK_MSG(false, "Unhandled dtype %hhd", input.scalar_type());
163+
ET_CHECK_MSG(
164+
false,
165+
"Unhandled dtype %hhd",
166+
static_cast<int8_t>(input.scalar_type()));
164167
}
165168
#undef LAYER_NORM
166169
return {out, mean_out, rstd_out};

kernels/portable/cpu/op_allclose.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ Tensor& allclose_out(
103103
ET_CHECK_MSG(
104104
out.scalar_type() == ScalarType::Bool,
105105
"Out tensor must be type Bool; saw type %hhd",
106-
out.scalar_type());
106+
static_cast<int8_t>(out.scalar_type()));
107107
ET_CHECK_MSG(
108108
out.numel() == 1,
109109
"Out tensor must be a single element; saw %zu elements",

kernels/portable/cpu/op_argmax.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ void check_preconditions(
4040
ET_CHECK_MSG(
4141
out.scalar_type() == ScalarType::Long,
4242
"Expected out tensor to have dtype Long, but got %hhd instead",
43-
out.scalar_type());
43+
static_cast<int8_t>(out.scalar_type()));
4444
ET_CHECK_MSG(
4545
out.dim() == compute_reduced_out_dim(in, dim, keepdim),
4646
"Number of dims of out tensor is not compatible with inputs and params");

kernels/portable/cpu/op_argmin.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ void check_preconditions(
4040
ET_CHECK_MSG(
4141
out.scalar_type() == ScalarType::Long,
4242
"Expected out tensor to have dtype Long, but got %hhd instead",
43-
out.scalar_type());
43+
static_cast<int8_t>(out.scalar_type()));
4444
ET_CHECK_MSG(
4545
out.dim() == compute_reduced_out_dim(in, dim, keepdim),
4646
"Number of dims of out tensor is not compatible with inputs and params");

kernels/portable/cpu/op_as_strided_copy.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,10 @@ Tensor& as_strided_copy_out(
145145
switch (self.scalar_type()) {
146146
ET_FORALL_SCALAR_TYPES(AS_STRIDED_COPY_TENSOR)
147147
default:
148-
ET_CHECK_MSG(false, "Unhandled dtype %hhd", self.scalar_type());
148+
ET_CHECK_MSG(
149+
false,
150+
"Unhandled dtype %hhd",
151+
static_cast<int8_t>(self.scalar_type()));
149152
}
150153
#undef AS_STRIDED_COPY_TENSOR
151154
return out;

kernels/portable/cpu/op_bitwise_not.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,10 @@ Tensor& bitwise_not_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
4444
in.numel());
4545
});
4646
} else {
47-
ET_CHECK_MSG(false, "Unsupported input dtype %hhd", in.scalar_type());
47+
ET_CHECK_MSG(
48+
false,
49+
"Unsupported input dtype %hhd",
50+
static_cast<int8_t>(in.scalar_type()));
4851
}
4952

5053
return out;

kernels/portable/cpu/op_cumsum.cpp

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -116,19 +116,25 @@ Tensor& cumsum_out(
116116
cumsum_tensors<SELF_CTYPE, OUT_CTYPE>(self, dim, out); \
117117
break;
118118

119-
#define CUMSUM_TENSORS(SELF_CTYPE, self_dtype) \
120-
case ScalarType::self_dtype: \
121-
switch (out.scalar_type()) { \
122-
ET_FORALL_REAL_TYPES_WITH(SELF_CTYPE, CUMSUM_IMPL) \
123-
default: \
124-
ET_CHECK_MSG(false, "Unhandled output dtype %hhd", out.scalar_type()); \
125-
} \
119+
#define CUMSUM_TENSORS(SELF_CTYPE, self_dtype) \
120+
case ScalarType::self_dtype: \
121+
switch (out.scalar_type()) { \
122+
ET_FORALL_REAL_TYPES_WITH(SELF_CTYPE, CUMSUM_IMPL) \
123+
default: \
124+
ET_CHECK_MSG( \
125+
false, \
126+
"Unhandled output dtype %hhd", \
127+
static_cast<int8_t>(out.scalar_type())); \
128+
} \
126129
break;
127130

128131
switch (self.scalar_type()) {
129132
ET_FORALL_REAL_TYPES_AND(Bool, CUMSUM_TENSORS)
130133
default:
131-
ET_CHECK_MSG(false, "Unhandled input dtype %hhd", self.scalar_type());
134+
ET_CHECK_MSG(
135+
false,
136+
"Unhandled input dtype %hhd",
137+
static_cast<int8_t>(self.scalar_type()));
132138
}
133139

134140
#undef CUMSUM_TENSORS

kernels/portable/cpu/op_glu.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,8 @@ glu_out(RuntimeContext& ctx, const Tensor& self, int64_t dim, Tensor& out) {
207207
switch (in_dtype) {
208208
ET_FORALL_FLOAT_TYPES(GLU_TENSOR)
209209
default:
210-
ET_CHECK_MSG(false, "Unhandled dtype %hhd", in_dtype);
210+
ET_CHECK_MSG(
211+
false, "Unhandled dtype %hhd", static_cast<int8_t>(in_dtype));
211212
}
212213
#undef GLU_TENSOR
213214
return out;

kernels/portable/cpu/op_nonzero.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ void check_preconditions(const Tensor& input, const Tensor& output) {
3939
ET_CHECK_MSG(
4040
output.scalar_type() == ScalarType::Long,
4141
"Expected out to be a Long tensor but received %hdd",
42-
output.scalar_type());
42+
static_cast<short>(output.scalar_type()));
4343
ET_CHECK_MSG(
4444
output.dim() == 2,
4545
"Expected out to be a 2d tensor received %zd",

kernels/portable/cpu/op_ones.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ Tensor& ones_out(RuntimeContext& ctx, IntArrayRef size, Tensor& out) {
6666
ET_CHECK_MSG(
6767
false,
6868
"out tensor should be a real or bool dtype, but got %hhd",
69-
out.scalar_type());
69+
static_cast<int8_t>(out.scalar_type()));
7070
}
7171
#undef ONES_OUT
7272

kernels/portable/cpu/op_split_copy.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,8 @@ void check_args(
9292
out[i].scalar_type() == out[0].scalar_type(),
9393
"out[%zu] dtype %hhd != out[0] dtype %hhd",
9494
i,
95-
out[i].scalar_type(),
96-
out[0].scalar_type());
95+
static_cast<int8_t>(out[i].scalar_type()),
96+
static_cast<int8_t>(out[0].scalar_type()));
9797

9898
// All outputs must have the same number of dimensions as the input.
9999
ET_CHECK_MSG(

kernels/portable/cpu/op_t_copy.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,8 @@ Tensor& t_copy_out(RuntimeContext& ctx, const Tensor& a, Tensor& out) {
6262
switch (a.scalar_type()) {
6363
ET_FORALL_SCALAR_TYPES(TRANSPOSE_TENSORS)
6464
default:
65-
ET_CHECK_MSG(false, "Unhandled dtype %hhd", a.scalar_type());
65+
ET_CHECK_MSG(
66+
false, "Unhandled dtype %hhd", static_cast<int8_t>(a.scalar_type()));
6667
}
6768

6869
#undef TRANSPOSE_TENSORS

kernels/portable/cpu/op_to_copy.cpp

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -54,19 +54,25 @@ Tensor& to_copy_out(
5454
_to_impl<SELF_CTYPE, OUT_CTYPE>(self, out); \
5555
break;
5656

57-
#define CASE_TENSOR_DTYPE(SELF_CTYPE, self_dtype) \
58-
case ScalarType::self_dtype: \
59-
switch (out.scalar_type()) { \
60-
ET_FORALL_REAL_TYPES_AND_WITH(Bool, SELF_CTYPE, TO_IMPL) \
61-
default: \
62-
ET_CHECK_MSG(false, "Unhandled output dtype %hhd", out.scalar_type()); \
63-
} \
57+
#define CASE_TENSOR_DTYPE(SELF_CTYPE, self_dtype) \
58+
case ScalarType::self_dtype: \
59+
switch (out.scalar_type()) { \
60+
ET_FORALL_REAL_TYPES_AND_WITH(Bool, SELF_CTYPE, TO_IMPL) \
61+
default: \
62+
ET_CHECK_MSG( \
63+
false, \
64+
"Unhandled output dtype %hhd", \
65+
static_cast<int8_t>(out.scalar_type())); \
66+
} \
6467
break;
6568

6669
switch (self.scalar_type()) {
6770
ET_FORALL_REAL_TYPES_AND(Bool, CASE_TENSOR_DTYPE);
6871
default:
69-
ET_CHECK_MSG(false, "Unhandled input dtype %hhd", self.scalar_type());
72+
ET_CHECK_MSG(
73+
false,
74+
"Unhandled input dtype %hhd",
75+
static_cast<int8_t>(self.scalar_type()));
7076
}
7177

7278
#undef CASE_TENSOR_DTYPE

kernels/portable/cpu/op_transpose_copy.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,8 @@ Tensor& transpose_copy_int_out(
9090
switch (a.scalar_type()) {
9191
ET_FORALL_SCALAR_TYPES(TRANSPOSE_TENSORS)
9292
default:
93-
ET_CHECK_MSG(false, "Unhandled dtype %hhd", a.scalar_type());
93+
ET_CHECK_MSG(
94+
false, "Unhandled dtype %hhd", static_cast<int8_t>(a.scalar_type()));
9495
}
9596

9697
#undef TRANSPOSE_TENSORS

kernels/portable/cpu/op_tril.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ Tensor& tril_out(
151151
ET_CHECK_MSG(
152152
false,
153153
"out tensor should be a real or bool dtype, but got %hhd",
154-
out.scalar_type());
154+
static_cast<int8_t>(out.scalar_type()));
155155
}
156156
#undef TRIL_OUT
157157

kernels/portable/cpu/op_unbind_copy.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ void check_args(const Tensor& input, int64_t dim, TensorList out) {
4646
out[i].scalar_type() == out[0].scalar_type(),
4747
"out[%zu] dtype %hhd != out[0] dtype %hhd",
4848
i,
49-
out[i].scalar_type(),
50-
out[0].scalar_type());
49+
static_cast<int8_t>(out[i].scalar_type()),
50+
static_cast<int8_t>(out[0].scalar_type()));
5151

5252
// output tensor must have # of dims = input.dim() -1
5353
ET_CHECK_MSG(

kernels/portable/cpu/pattern/unary_ufunc_realb_to_bool.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ Tensor& unary_ufunc_realb_to_bool(
3030
ET_CHECK_MSG(
3131
out.scalar_type() == exec_aten::ScalarType::Bool,
3232
"Expected out tensor to have dtype Bool, but got %hhd instead.",
33-
out.scalar_type());
33+
static_cast<int8_t>(out.scalar_type()));
3434

3535
const auto in_type = in.scalar_type();
3636

kernels/portable/cpu/util/index_util.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ bool indices_list_is_valid(
122122
ET_LOG_MSG_AND_RETURN_IF_FALSE(
123123
false,
124124
"%hhd scalar type is not supported for indices",
125-
index.scalar_type());
125+
static_cast<int8_t>(index.scalar_type()));
126126
}
127127
}
128128
return true;

kernels/quantized/cpu/op_add.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,8 @@ Tensor& quantized_add_out(
151151
switch (a.scalar_type()) {
152152
ET_FORALL_INT_TYPES(ADD_TENSORS)
153153
default:
154-
ET_CHECK_MSG(false, "Unhandled dtype %hhd", a.scalar_type());
154+
ET_CHECK_MSG(
155+
false, "Unhandled dtype %hhd", static_cast<int8_t>(a.scalar_type()));
155156
}
156157

157158
#undef ADD_TENSORS

kernels/quantized/cpu/op_choose_qparams.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,15 +47,15 @@ void check_quantize_per_tensor_args(
4747
ET_CHECK_MSG(
4848
input.scalar_type() == ScalarType::Float,
4949
"Expected input to be Float tensor received: %hdd",
50-
input.scalar_type());
50+
static_cast<short>(input.scalar_type()));
5151
ET_CHECK_MSG(
5252
scale_out.scalar_type() == ScalarType::Double,
5353
"Expected scale to be Double tensor received: %hdd",
54-
scale_out.scalar_type());
54+
static_cast<short>(scale_out.scalar_type()));
5555
ET_CHECK_MSG(
5656
zero_point_out.scalar_type() == ScalarType::Long,
5757
"Expected scale to be Long tensor received: %hdd",
58-
zero_point_out.scalar_type());
58+
static_cast<short>(zero_point_out.scalar_type()));
5959
ET_CHECK_MSG(
6060
scale_out.numel() == 1,
6161
"Exepcted scale to only have one element received: %zd",

kernels/quantized/cpu/op_dequantize.cpp

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -41,17 +41,17 @@ void check_dequantize_per_tensor_args(
4141
input.scalar_type() == ScalarType::Short ||
4242
input.scalar_type() == ScalarType::Int,
4343
"input.scalar_type() %hdd is not supported:",
44-
input.scalar_type());
44+
static_cast<short>(input.scalar_type()));
4545

4646
ET_CHECK_MSG(
4747
input.scalar_type() == dtype,
4848
"input.scalar_type() %hdd is not matching dtype argumenta:",
49-
input.scalar_type());
49+
static_cast<short>(input.scalar_type()));
5050

5151
ET_CHECK_MSG(
5252
out.scalar_type() == ScalarType::Float,
5353
"out.scalar_type() %hdd is not supported:",
54-
out.scalar_type());
54+
static_cast<short>(out.scalar_type()));
5555

5656
ET_CHECK_MSG(
5757
quant_min <= quant_max,
@@ -100,19 +100,25 @@ Tensor& dequantize_per_tensor_out(
100100
static_cast<float>(scale)); \
101101
} \
102102
break;
103-
#define CALCULATE_INT_TYPE(IN_CTYPE, in_dtype) \
104-
case ScalarType::in_dtype: \
105-
switch (out.scalar_type()) { \
106-
ET_FORALL_FLOAT_TYPES_WITH(IN_CTYPE, DEQUANTIZE_IMPL); \
107-
default: \
108-
ET_CHECK_MSG(false, "Unhandled output dtype %hhd", out.scalar_type()); \
109-
} \
103+
#define CALCULATE_INT_TYPE(IN_CTYPE, in_dtype) \
104+
case ScalarType::in_dtype: \
105+
switch (out.scalar_type()) { \
106+
ET_FORALL_FLOAT_TYPES_WITH(IN_CTYPE, DEQUANTIZE_IMPL); \
107+
default: \
108+
ET_CHECK_MSG( \
109+
false, \
110+
"Unhandled output dtype %hhd", \
111+
static_cast<int8_t>(out.scalar_type())); \
112+
} \
110113
break;
111114

112115
switch (input.scalar_type()) {
113116
ET_FORALL_INT_TYPES(CALCULATE_INT_TYPE);
114117
default:
115-
ET_CHECK_MSG(false, "Unhandled input dtype %hhd", input.scalar_type());
118+
ET_CHECK_MSG(
119+
false,
120+
"Unhandled input dtype %hhd",
121+
static_cast<int8_t>(input.scalar_type()));
116122
}
117123

118124
#undef CALCULATE_FLOAT_TYPE
@@ -131,11 +137,11 @@ Tensor& dequantize_per_tensor_tensor_args_out(
131137
ET_CHECK_MSG(
132138
scale.scalar_type() == ScalarType::Double,
133139
"Expected scale to be Double tensor received: %hdd",
134-
scale.scalar_type());
140+
static_cast<short>(scale.scalar_type()));
135141
ET_CHECK_MSG(
136142
zero_point.scalar_type() == ScalarType::Long,
137143
"Expected scale to be Long tensor received: %hdd",
138-
zero_point.scalar_type());
144+
static_cast<short>(zero_point.scalar_type()));
139145
ET_CHECK_MSG(
140146
scale.numel() == 1,
141147
"Exepcted scale to only have one element received: %zd",

0 commit comments

Comments
 (0)