@@ -19,13 +19,20 @@ def forward(self, x):
19
19
z = torch .abs (x )
20
20
return z
21
21
22
- def _test_abs (self , inputs ):
23
- (
22
+ def _test_abs (self , inputs , legacy_mode : bool = False ):
23
+ tester = (
24
24
Tester (self .Abs (), inputs )
25
25
.export ()
26
26
.check_count ({"torch.ops.aten.abs.default" : 1 })
27
- .to_edge_transform_and_lower ()
28
- .check_count ({"torch.ops.higher_order.executorch_call_delegate" : 1 })
27
+ )
28
+
29
+ if legacy_mode :
30
+ tester = tester .to_edge ().partition ()
31
+ else :
32
+ tester = tester .to_edge_transform_and_lower ()
33
+
34
+ (
35
+ tester .check_count ({"torch.ops.higher_order.executorch_call_delegate" : 1 })
29
36
.check_not (["executorch_exir_dialects_edge__ops_aten_abs_default" ])
30
37
.to_executorch ()
31
38
.serialize ()
@@ -41,7 +48,18 @@ def test_fp16_abs(self):
41
48
],
42
49
).to (torch .float16 ),
43
50
)
44
- self ._test_abs (inputs )
51
+ self ._test_abs (inputs , legacy_mode = False )
52
+
53
+ def test_fp16_abs_legacy_mode (self ):
54
+ inputs = (
55
+ torch .Tensor (
56
+ [
57
+ [0.0 , 0.1 , 0.5 , 0.499 ],
58
+ [- 0.6 , - 0.4 , 100.1 , - 1000.1 ],
59
+ ],
60
+ ).to (torch .float16 ),
61
+ )
62
+ self ._test_abs (inputs , legacy_mode = True )
45
63
46
64
def test_fp32_abs (self ):
47
65
inputs = (
@@ -52,4 +70,15 @@ def test_fp32_abs(self):
52
70
],
53
71
),
54
72
)
55
- self ._test_abs (inputs )
73
+ self ._test_abs (inputs , legacy_mode = False )
74
+
75
+ def test_fp32_abs_legacy_mode (self ):
76
+ inputs = (
77
+ torch .Tensor (
78
+ [
79
+ [0.0 , 0.1 , 0.5 , 0.499 ],
80
+ [- 0.6 , - 0.4 , 100.1 , - 1000.1 ],
81
+ ],
82
+ ),
83
+ )
84
+ self ._test_abs (inputs , legacy_mode = True )
0 commit comments