Skip to content

Commit 81ab2cc

Browse files
xytintelpytorchmergebot
authored andcommitted
Update torch-xpu-ops commit pin (pytorch#141201)
Update the torch-xpu-ops commit to [1e32bbc](intel/torch-xpu-ops@1e32bbc), includes: - Improve XPU aten operator coverage - Support basic `SparseXPU` operators Pull Request resolved: pytorch#141201 Approved by: https://github.com/EikanWang, https://github.com/jansel
1 parent 795f28a commit 81ab2cc

File tree

2 files changed

+3
-4
lines changed

2 files changed

+3
-4
lines changed

test/inductor/test_torchinductor_opinfo.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ def format_op(op):
269269
"tan": {f16},
270270
"torch.ops.aten._flash_attention_forward": {f16},
271271
"torch.ops.aten._efficient_attention_forward": {f16, f32},
272-
"to_sparse": {f16, f32, f64, b8, i32, i64},
272+
"to_sparse": {f32, f64},
273273
"linalg.eig": {f32, f64},
274274
"linalg.eigvals": {f32, f64},
275275
# Double and complex datatype matmul is not supported in oneDNN
@@ -320,6 +320,7 @@ def format_op(op):
320320
"linalg.qr": {f64},
321321
"linalg.pinv": {f64},
322322
("linalg.pinv", "hermitian"): {f64},
323+
("linalg.pinv", "singular"): {f64},
323324
"linalg.norm": {f64},
324325
("linalg.norm", "subgradients_at_zero"): {f64},
325326
"linalg.matrix_rank": {f64},
@@ -349,8 +350,6 @@ def format_op(op):
349350
# a deconvolution forward propagation primitive
350351
"nn.functional.conv_transpose2d": {f32, f64},
351352
"nn.functional.conv_transpose3d": {f32, f64},
352-
# frexp not supported on XPU now
353-
"frexp": {f16, f32, f64},
354353
# not implemented for 'Half'
355354
"sort": {b8},
356355
"argsort": {b8},

third_party/xpu.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
bf4bab1ffffd84e5f747f65a17e08ee2fe633102
1+
1e32bbc3d9a68112299e02566cf4b174b89c24c9

0 commit comments

Comments
 (0)