@@ -25,6 +25,19 @@ using exec_aten::ScalarType;
25
25
using exec_aten::Tensor;
26
26
using torch::executor::testing::TensorFactory;
27
27
28
+ namespace {
29
+ void expect_tensor_close_with_increased_tol (
30
+ const Tensor& actual,
31
+ const Tensor& expected) {
32
+ if (actual.scalar_type () == ScalarType::BFloat16 ||
33
+ actual.scalar_type () == ScalarType::Half) {
34
+ EXPECT_TENSOR_CLOSE_WITH_TOL (expected, actual, 1e-2 , 1e-2 );
35
+ } else {
36
+ EXPECT_TENSOR_CLOSE (expected, actual);
37
+ }
38
+ }
39
+ } // namespace
40
+
28
41
class OpVarOutTest : public OperatorTest {
29
42
protected:
30
43
Tensor& op_var_out (
@@ -142,7 +155,7 @@ class OpVarOutTest : public OperatorTest {
142
155
op_var_out (
143
156
self, optional_dim_list, /* unbiased=*/ true , /* keepdim=*/ true , out);
144
157
// clang-format off
145
- EXPECT_TENSOR_CLOSE (out, tf_out.make (
158
+ expect_tensor_close_with_increased_tol (out, tf_out.make (
146
159
{2 , 3 , 1 },
147
160
{
148
161
1.666667 ,
@@ -160,7 +173,7 @@ class OpVarOutTest : public OperatorTest {
160
173
op_var_out (
161
174
self, optional_dim_list, /* unbiased=*/ true , /* keepdim=*/ false , out);
162
175
// clang-format off
163
- EXPECT_TENSOR_CLOSE (out, tf_out.make (
176
+ expect_tensor_close_with_increased_tol (out, tf_out.make (
164
177
{2 , 3 },
165
178
{
166
179
1.666667 , 1.666667 , 1.666667 ,
@@ -174,12 +187,14 @@ class OpVarOutTest : public OperatorTest {
174
187
optional_dim_list = ArrayRef<int64_t >{dims_2, 2 };
175
188
op_var_out (
176
189
self, optional_dim_list, /* unbiased=*/ true , /* keepdim=*/ true , out);
177
- EXPECT_TENSOR_CLOSE (out, tf_out.make ({1 , 1 , 4 }, {56.0 , 56.0 , 56.0 , 56.0 }));
190
+ expect_tensor_close_with_increased_tol (
191
+ out, tf_out.make ({1 , 1 , 4 }, {56.0 , 56.0 , 56.0 , 56.0 }));
178
192
179
193
out = tf_out.zeros ({4 });
180
194
op_var_out (
181
195
self, optional_dim_list, /* unbiased=*/ true , /* keepdim=*/ false , out);
182
- EXPECT_TENSOR_CLOSE (out, tf_out.make ({4 }, {56.0 , 56.0 , 56.0 , 56.0 }));
196
+ expect_tensor_close_with_increased_tol (
197
+ out, tf_out.make ({4 }, {56.0 , 56.0 , 56.0 , 56.0 }));
183
198
184
199
// dim list with negative dimensions should work
185
200
out = tf_out.zeros ({2 , 1 , 4 });
@@ -188,7 +203,7 @@ class OpVarOutTest : public OperatorTest {
188
203
op_var_out (
189
204
self, optional_dim_list, /* unbiased=*/ false , /* keepdim=*/ true , out);
190
205
// clang-format off
191
- EXPECT_TENSOR_CLOSE (out, tf_out.make (
206
+ expect_tensor_close_with_increased_tol (out, tf_out.make (
192
207
{2 , 1 , 4 },
193
208
{
194
209
10.666667 , 10.666667 , 10.666667 , 10.666667 ,
@@ -201,18 +216,19 @@ class OpVarOutTest : public OperatorTest {
201
216
out = tf_out.zeros ({1 , 1 , 1 });
202
217
optional<ArrayRef<int64_t >> null_dim_list;
203
218
op_var_out (self, null_dim_list, /* unbiased=*/ true , /* keepdim=*/ true , out);
204
- EXPECT_TENSOR_CLOSE (out, tf_out.make ({1 , 1 , 1 }, {50.0 }));
219
+ expect_tensor_close_with_increased_tol (out, tf_out.make ({1 , 1 , 1 }, {50.0 }));
205
220
206
221
optional<ArrayRef<int64_t >> empty_dim_list{ArrayRef<int64_t >{}};
207
222
op_var_out (self, empty_dim_list, /* unbiased=*/ false , /* keepdim=*/ true , out);
208
- EXPECT_TENSOR_CLOSE (out, tf_out.make ({1 , 1 , 1 }, {47.916668 }));
223
+ expect_tensor_close_with_increased_tol (
224
+ out, tf_out.make ({1 , 1 , 1 }, {47.916668 }));
209
225
210
226
out = tf_out.zeros ({});
211
227
op_var_out (self, null_dim_list, /* unbiased=*/ false , /* keepdim=*/ false , out);
212
- EXPECT_TENSOR_CLOSE (out, tf_out.make ({}, {47.916668 }));
228
+ expect_tensor_close_with_increased_tol (out, tf_out.make ({}, {47.916668 }));
213
229
214
230
op_var_out (self, empty_dim_list, /* unbiased=*/ true , /* keepdim=*/ false , out);
215
- EXPECT_TENSOR_CLOSE (out, tf_out.make ({}, {50.0 }));
231
+ expect_tensor_close_with_increased_tol (out, tf_out.make ({}, {50.0 }));
216
232
}
217
233
};
218
234
@@ -227,6 +243,20 @@ class OpVarCorrectionOutTest : public OperatorTest {
227
243
return torch::executor::aten::var_outf (
228
244
context_, self, dim, correction, keepdim, out);
229
245
}
246
+
247
+ template <ScalarType DTYPE>
248
+ void test_dtype () {
249
+ TensorFactory<DTYPE> tf;
250
+
251
+ Tensor x = tf.make ({2 , 3 }, {4.9 , 4.0 , 5.6 , 3.8 , 4.9 , 5.6 });
252
+ Tensor expected = tf.make ({2 }, {0.72693 , 0.93032 });
253
+ optional<Scalar> correction (1.23 );
254
+ Tensor out = tf.zeros ({2 });
255
+
256
+ op_var_correction_out (
257
+ x, ArrayRef<int64_t >{1 }, correction, /* keepdim=*/ false , out);
258
+ expect_tensor_close_with_increased_tol (out, expected);
259
+ }
230
260
};
231
261
232
262
TEST_F (OpVarOutTest, InvalidDimensionListDies) {
@@ -303,9 +333,9 @@ TEST_F(OpVarOutTest, AllFloatInputFloatOutputPasses) {
303
333
test_var_out_dtype<ScalarType::INPUT_DTYPE, ScalarType::OUTPUT_DTYPE>();
304
334
305
335
#define TEST_ENTRY (INPUT_CTYPE, INPUT_DTYPE ) \
306
- ET_FORALL_FLOAT_TYPES_WITH2 (INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
336
+ ET_FORALL_FLOATHBF16_TYPES_WITH2 (INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
307
337
308
- ET_FORALL_FLOAT_TYPES (TEST_ENTRY);
338
+ ET_FORALL_FLOATHBF16_TYPES (TEST_ENTRY);
309
339
#undef TEST_ENTRY
310
340
#undef TEST_KERNEL
311
341
}
@@ -387,14 +417,7 @@ TEST_F(OpVarOutTest, DynamicShapeUnbound) {
387
417
}
388
418
389
419
TEST_F (OpVarCorrectionOutTest, SmokeTest) {
390
- TensorFactory<ScalarType::Float> tf;
391
-
392
- Tensor x = tf.make ({2 , 3 }, {4.9 , 4.0 , 5.6 , 3.8 , 4.9 , 5.6 });
393
- Tensor expected = tf.make ({2 }, {0.72693 , 0.93032 });
394
- optional<Scalar> correction (1.23 );
395
- Tensor out = tf.zeros ({2 });
396
-
397
- op_var_correction_out (
398
- x, ArrayRef<int64_t >{1 }, correction, /* keepdim=*/ false , out);
399
- EXPECT_TENSOR_CLOSE (out, expected);
420
+ #define TEST_ENTRY (ctype, dtype ) test_dtype<ScalarType::dtype>();
421
+ ET_FORALL_FLOATHBF16_TYPES (TEST_ENTRY);
422
+ #undef TEST_ENTRY
400
423
}
0 commit comments