Skip to content

Commit d99d26e

Browse files
authored
c10::optional -> std::optional
Differential Revision: D65439045 Pull Request resolved: #6642
1 parent 363505f commit d99d26e

File tree

8 files changed

+36
-36
lines changed

8 files changed

+36
-36
lines changed

kernels/quantized/cpu/embeddingxb.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ static inline int32_t get_embedding_dim(
6565
void check_embedding_xbit_args(
6666
const Tensor& weight,
6767
const Tensor& weight_scales,
68-
const optional<Tensor>& opt_weight_zero_points,
68+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
6969
const int64_t weight_quant_min,
7070
const int64_t weight_quant_max,
7171
const Tensor& indices,
@@ -170,7 +170,7 @@ template <typename CTYPE_PARAMS, typename CTYPE_OUT>
170170
void embedding_xbit_per_channel(
171171
const Tensor& weight,
172172
const Tensor& weight_scales,
173-
const optional<Tensor>& opt_weight_zero_points,
173+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
174174
const Tensor& indices,
175175
Tensor& out,
176176
int weight_nbit) {
@@ -260,7 +260,7 @@ Tensor& quantized_embedding_xbit_out(
260260
// non quant input and returns fp output
261261
const Tensor& weight,
262262
const Tensor& weight_scales,
263-
const optional<Tensor>& opt_weight_zero_points,
263+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
264264
const int64_t weight_quant_min,
265265
const int64_t weight_quant_max,
266266
const Tensor& indices,
@@ -299,7 +299,7 @@ Tensor& quantized_embedding_xbit_out(
299299
KernelRuntimeContext& context,
300300
const Tensor& weight,
301301
const Tensor& weight_scales,
302-
const optional<Tensor>& opt_weight_zero_points,
302+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
303303
int64_t weight_quant_min,
304304
int64_t weight_quant_max,
305305
const Tensor& indices,
@@ -325,7 +325,7 @@ Tensor& quantized_embedding_xbit_dtype_out(
325325
// non quant input and returns fp output
326326
const Tensor& weight,
327327
const Tensor& weight_scales,
328-
const optional<Tensor>& opt_weight_zero_points,
328+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
329329
const int64_t weight_quant_min,
330330
const int64_t weight_quant_max,
331331
const Tensor& indices,
@@ -368,7 +368,7 @@ Tensor& quantized_embedding_xbit_dtype_out(
368368
KernelRuntimeContext& context,
369369
const Tensor& weight,
370370
const Tensor& weight_scales,
371-
const optional<Tensor>& opt_weight_zero_points,
371+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
372372
int64_t weight_quant_min,
373373
int64_t weight_quant_max,
374374
const Tensor& indices,

kernels/quantized/cpu/embeddingxb.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ Tensor& quantized_embedding_xbit_out(
2424
// non quant input and returns fp output
2525
const Tensor& weight,
2626
const Tensor& weight_scales,
27-
const optional<Tensor>& opt_weight_zero_points,
27+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
2828
const int64_t weight_quant_min,
2929
const int64_t weight_quant_max,
3030
const Tensor& indices,
@@ -35,7 +35,7 @@ Tensor& quantized_embedding_xbit_out(
3535
KernelRuntimeContext& context,
3636
const Tensor& weight,
3737
const Tensor& weight_scales,
38-
const optional<Tensor>& opt_weight_zero_points,
38+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
3939
int64_t weight_quant_min,
4040
int64_t weight_quant_max,
4141
const Tensor& indices,
@@ -47,7 +47,7 @@ Tensor& quantized_embedding_xbit_dtype_out(
4747
// non quant input and returns fp output
4848
const Tensor& weight,
4949
const Tensor& weight_scales,
50-
const optional<Tensor>& opt_weight_zero_points,
50+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
5151
const int64_t weight_quant_min,
5252
const int64_t weight_quant_max,
5353
const Tensor& indices,
@@ -59,7 +59,7 @@ Tensor& quantized_embedding_xbit_dtype_out(
5959
KernelRuntimeContext& context,
6060
const Tensor& weight,
6161
const Tensor& weight_scales,
62-
const optional<Tensor>& opt_weight_zero_points,
62+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
6363
int64_t weight_quant_min,
6464
int64_t weight_quant_max,
6565
const Tensor& indices,

kernels/quantized/cpu/op_dequantize.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ float get_scale(const Tensor& scale, size_t channel_ix) {
186186
Tensor& dequantize_per_channel_out(
187187
const Tensor& input,
188188
const Tensor& scale,
189-
const optional<Tensor>& opt_zero_points,
189+
const exec_aten::optional<Tensor>& opt_zero_points,
190190
int64_t axis,
191191
int64_t quant_min,
192192
int64_t quant_max,
@@ -261,7 +261,7 @@ Tensor& dequantize_per_channel_out(
261261
const auto* input_data_ptr = input.const_data_ptr<CTYPE_IN>(); \
262262
ET_CHECK_MSG( \
263263
axis == 0, "Axis must be 0 for a single dimensional tensors"); \
264-
const optional<int64_t> dim; \
264+
const exec_aten::optional<int64_t> dim; \
265265
apply_over_dim( \
266266
[input_data_ptr, out_data_ptr, zero_point_data, &scale]( \
267267
size_t numel, size_t stride, size_t base_ix) { \
@@ -331,7 +331,7 @@ Tensor& dequantize_per_channel_out(
331331
KernelRuntimeContext& context,
332332
const Tensor& input,
333333
const Tensor& scale,
334-
const optional<Tensor>& opt_zero_points,
334+
const exec_aten::optional<Tensor>& opt_zero_points,
335335
int64_t axis,
336336
int64_t quant_min,
337337
int64_t quant_max,

kernels/quantized/cpu/op_embedding.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ namespace {
2727
void check_embedding_byte_args(
2828
const Tensor& weight,
2929
const Tensor& weight_scales,
30-
const optional<Tensor>& opt_weight_zero_points,
30+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
3131
const int64_t weight_quant_min,
3232
const int64_t weight_quant_max,
3333
const Tensor& indices,
@@ -129,7 +129,7 @@ template <typename CTYPE_WEIGHT, typename CTYPE_PARAMS, typename CTYPE_OUT>
129129
void embedding_byte_per_channel(
130130
const Tensor& weight,
131131
const Tensor& weight_scales,
132-
const optional<Tensor>& opt_weight_zero_points,
132+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
133133
const Tensor& indices,
134134
Tensor& out) {
135135
// An embedding layer nn.Embedding(num_embeddings, embedding_dim) has a
@@ -218,7 +218,7 @@ Tensor& quantized_embedding_byte_out(
218218
// non quant input and returns fp output
219219
const Tensor& weight,
220220
const Tensor& weight_scales,
221-
const optional<Tensor>& opt_weight_zero_points,
221+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
222222
const int64_t weight_quant_min,
223223
const int64_t weight_quant_max,
224224
const Tensor& indices,
@@ -253,7 +253,7 @@ Tensor& quantized_embedding_byte_out(
253253
KernelRuntimeContext& context,
254254
const Tensor& weight,
255255
const Tensor& weight_scales,
256-
const optional<Tensor>& opt_weight_zero_points,
256+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
257257
int64_t weight_quant_min,
258258
int64_t weight_quant_max,
259259
const Tensor& indices,
@@ -277,7 +277,7 @@ Tensor& quantized_embedding_byte_dtype_out(
277277
// non quant input and returns fp output
278278
const Tensor& weight,
279279
const Tensor& weight_scales,
280-
const optional<Tensor>& opt_weight_zero_points,
280+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
281281
const int64_t weight_quant_min,
282282
const int64_t weight_quant_max,
283283
const Tensor& indices,
@@ -316,7 +316,7 @@ Tensor& quantized_embedding_byte_dtype_out(
316316
KernelRuntimeContext& context,
317317
const Tensor& weight,
318318
const Tensor& weight_scales,
319-
const optional<Tensor>& opt_weight_zero_points,
319+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
320320
int64_t weight_quant_min,
321321
int64_t weight_quant_max,
322322
const Tensor& indices,

kernels/quantized/cpu/op_embedding2b.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ Tensor& quantized_embedding_2bit_out(
3737
// non quant input and returns fp output
3838
const Tensor& weight,
3939
const Tensor& weight_scales,
40-
const optional<Tensor>& opt_weight_zero_points,
40+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
4141
const int64_t weight_quant_min,
4242
const int64_t weight_quant_max,
4343
const Tensor& indices,
@@ -57,7 +57,7 @@ Tensor& quantized_embedding_2bit_out(
5757
KernelRuntimeContext& context,
5858
const Tensor& weight,
5959
const Tensor& weight_scales,
60-
const optional<Tensor>& opt_weight_zero_points,
60+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
6161
int64_t weight_quant_min,
6262
int64_t weight_quant_max,
6363
const Tensor& indices,
@@ -77,7 +77,7 @@ Tensor& quantized_embedding_2bit_out(
7777
Tensor& quantized_embedding_2bit_dtype_out(
7878
const Tensor& weight,
7979
const Tensor& weight_scales,
80-
const optional<Tensor>& opt_weight_zero_points,
80+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
8181
int64_t weight_quant_min,
8282
int64_t weight_quant_max,
8383
const Tensor& indices,
@@ -99,7 +99,7 @@ Tensor& quantized_embedding_2bit_dtype_out(
9999
KernelRuntimeContext& context,
100100
const Tensor& weight,
101101
const Tensor& weight_scales,
102-
const optional<Tensor>& opt_weight_zero_points,
102+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
103103
int64_t weight_quant_min,
104104
int64_t weight_quant_max,
105105
const Tensor& indices,

kernels/quantized/cpu/op_embedding4b.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ Tensor& quantized_embedding_4bit_out(
3737
// non quant input and returns fp output
3838
const Tensor& weight,
3939
const Tensor& weight_scales,
40-
const optional<Tensor>& opt_weight_zero_points,
40+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
4141
const int64_t weight_quant_min,
4242
const int64_t weight_quant_max,
4343
const Tensor& indices,
@@ -57,7 +57,7 @@ Tensor& quantized_embedding_4bit_out(
5757
KernelRuntimeContext& context,
5858
const Tensor& weight,
5959
const Tensor& weight_scales,
60-
const optional<Tensor>& opt_weight_zero_points,
60+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
6161
int64_t weight_quant_min,
6262
int64_t weight_quant_max,
6363
const Tensor& indices,
@@ -79,7 +79,7 @@ Tensor& quantized_embedding_4bit_dtype_out(
7979
// non quant input and returns fp output
8080
const Tensor& weight,
8181
const Tensor& weight_scales,
82-
const optional<Tensor>& opt_weight_zero_points,
82+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
8383
const int64_t weight_quant_min,
8484
const int64_t weight_quant_max,
8585
const Tensor& indices,
@@ -101,7 +101,7 @@ Tensor& quantized_embedding_4bit_dtype_out(
101101
KernelRuntimeContext& context,
102102
const Tensor& weight,
103103
const Tensor& weight_scales,
104-
const optional<Tensor>& opt_weight_zero_points,
104+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
105105
int64_t weight_quant_min,
106106
int64_t weight_quant_max,
107107
const Tensor& indices,

kernels/quantized/cpu/op_mixed_linear.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@ bool check_quantized_mixed_linear_args(
1919
const Tensor& in,
2020
const Tensor& weight,
2121
const Tensor& weight_scales,
22-
const optional<Tensor>& opt_weight_zero_points,
23-
const optional<ScalarType> dtype,
22+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
23+
const exec_aten::optional<ScalarType> dtype,
2424
Tensor& out) {
2525
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2));
2626
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(weight, 2));
@@ -64,8 +64,8 @@ Tensor& quantized_mixed_linear_out(
6464
const Tensor& in,
6565
const Tensor& weight,
6666
const Tensor& weight_scales,
67-
const optional<Tensor>& opt_weight_zero_points,
68-
const optional<ScalarType> dtype,
67+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
68+
const exec_aten::optional<ScalarType> dtype,
6969
Tensor& out) {
7070
// TODO (gjcomer) Replace with ET_KERNEL_CHECK when context is available.
7171
ET_CHECK(check_quantized_mixed_linear_args(
@@ -117,8 +117,8 @@ Tensor& quantized_mixed_linear_out(
117117
const Tensor& in,
118118
const Tensor& weight,
119119
const Tensor& weight_scales,
120-
const optional<Tensor>& opt_weight_zero_points,
121-
const optional<ScalarType> dtype,
120+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
121+
const exec_aten::optional<ScalarType> dtype,
122122
Tensor& out) {
123123
// TODO(mcandales): Remove the need for this wrapper
124124
// TODO(mkg): add support for dtype

kernels/quantized/cpu/op_mixed_mm.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ bool check_quantized_mixed_mm_args(
1919
const Tensor& in,
2020
const Tensor& weight,
2121
const Tensor& weight_scales,
22-
const optional<Tensor>& opt_weight_zero_points,
22+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
2323
Tensor& out) {
2424
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2));
2525
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(weight, 2));
@@ -55,7 +55,7 @@ Tensor& quantized_mixed_mm_out(
5555
const Tensor& in,
5656
const Tensor& weight,
5757
const Tensor& weight_scales,
58-
const optional<Tensor>& opt_weight_zero_points,
58+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
5959
Tensor& out) {
6060
ET_CHECK(check_quantized_mixed_mm_args(
6161
in, weight, weight_scales, opt_weight_zero_points, out));
@@ -92,7 +92,7 @@ Tensor& quantized_mixed_mm_out(
9292
const Tensor& in,
9393
const Tensor& weight,
9494
const Tensor& weight_scales,
95-
const optional<Tensor>& opt_weight_zero_points,
95+
const exec_aten::optional<Tensor>& opt_weight_zero_points,
9696
Tensor& out) {
9797
// TODO(mcandales): Remove the need for this wrapper
9898
(void)ctx;

0 commit comments

Comments
 (0)