Skip to content

fix Piper test failures #17135

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 12, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions stdlib/public/TensorFlow/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ list(APPEND swift_stdlib_compile_flags "-Xllvm" "-sil-partial-specialization")
list(APPEND swift_stdlib_compile_flags "-swift-version" "4")
list(APPEND swift_stdlib_compile_flags "-force-single-frontend-invocation")
list(APPEND swift_stdlib_compile_flags "-Xcc" "-I${TF_INCLUDE_DIR}")
# FIXME(SR-7972): Some tests fail when TensorFlow is optimized.
list(APPEND swift_stdlib_compile_flags "-Onone")

set(SOURCES
CompilerRuntime.swift
Expand Down
2 changes: 1 addition & 1 deletion stdlib/public/TensorFlow/Ops.swift
Original file line number Diff line number Diff line change
Expand Up @@ -1166,7 +1166,7 @@ public extension Tensor {
let offset: Tensor<Int32> = Tensor<Int32>(
Raw.scatterNd(
indices: scatterIndices,
updates: boundSize,
updates: Tensor<Float>(boundSize),
shape: rankTensor.rankLifted()
)
)
Expand Down
4 changes: 4 additions & 0 deletions stdlib/public/core/Arrays.swift.gyb
Original file line number Diff line number Diff line change
Expand Up @@ -970,6 +970,10 @@ extension ${Self} : ExpressibleByArrayLiteral {
///
/// - Parameter elements: A variadic list of elements of the new array.
@_inlineable
// SWIFT_ENABLE_TENSORFLOW
// FIXME: We can probably remove @_transparent once constexpr folding is
// more fleshed out.
@_transparent
public init(arrayLiteral elements: Element...) {
self = elements
}
Expand Down
3 changes: 0 additions & 3 deletions test/TensorFlow/crashers.swift
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
// RUN: %target-swift-frontend -Xllvm -tf-dump-intermediates -O -emit-sil %s -verify

// FIXME(b/78371828): Should this test work with optimized_stdlib?
// UNSUPPORTED: optimized_stdlib
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will removing this fail the test on OSS side?

same for retain_release.swift below (I'm trying to re-enable that test in the other googleprivate branch of Marc's)

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The test used to fail on OSS because TensorFlow was optimized. Now that it's not optimized, this test passes on OSS. (Same for the other tests).


// This file contains various regression tests that crashed the compiler.

import TensorFlow
Expand Down
6 changes: 6 additions & 0 deletions test/TensorFlow/deabstraction_finished.swift
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ func one() -> Int {
}

public func constexprCall(a: Tensor<Float>, idx: Tensor<Int32>) -> Tensor<Float> {
// FIXME: ConstExpr folding can't deal with the non-optimized initializer.
// expected-error @+2 {{attribute 'axis' requires a constant argument}}
// expected-note @+1 {{could not fold operation}}
return Tensor<Float>(oneHotAtIndices: idx.toDevice(), depth: 0, axis: one())
}

Expand All @@ -25,6 +28,9 @@ struct Wrapper {

public func f(a: Tensor<Float>, idx: Tensor<Int32>) -> Tensor<Float> {
let w = Wrapper(v: 1)
// FIXME: ConstExpr folding can't deal with the non-optimized initializer.
// expected-error @+2 {{attribute 'axis' requires a constant argument}}
// expected-note @+1 {{could not fold operation}}
return Tensor<Float>(oneHotAtIndices: idx.toDevice(), depth: 0, axis: w.v)
}

Expand Down
76 changes: 38 additions & 38 deletions test/TensorFlow/integration.swift
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
// RUN: %target-swift-frontend -Xllvm -tf-dump-intermediates -O -emit-sil -verify %s
// RUN: %target-swift-frontend -Xllvm -tf-dump-intermediates -O -emit-sil -verify %s | %FileCheck %s

// FIXME(b/78371828): Should this test work with optimized_stdlib?
// UNSUPPORTED: optimized_stdlib

import TensorFlow

public func testTensor(a: Tensor<Float>, b: Tensor<Float>) {
Expand All @@ -22,14 +19,17 @@ public func testTensor(a: Tensor<Float>, b: Tensor<Float>) {
// CHECK-LABEL: --- TFPartition Accelerator Result: {{.*}}testTensor{{.*}}
// CHECK: sil private @{{.*}}testTensor{{.*}} : $@callee_owned (TensorHandle<Float>, TensorHandle<Float>) -> TensorHandle<Float> {
// CHECK: bb0(%0 : $TensorHandle<Float>, %1 : $TensorHandle<Float>):
// CHECK-NEXT: %2 = string_literal utf8 "/device:CPU:0"
// CHECK-NEXT: %3 = builtin "__tfop_Add,$in,$in,device"(%0 : $TensorHandle<Float>, %0 : $TensorHandle<Float>, %2 : $Builtin.RawPointer) : $TensorHandle<Float>
// CHECK-NEXT: %4 = string_literal utf8 "/device:CPU:0"
// CHECK-NEXT: %5 = builtin "__tfop_Sub,$in,$in,device"(%3 : $TensorHandle<Float>, %3 : $TensorHandle<Float>, %4 : $Builtin.RawPointer) : $TensorHandle<Float>
// CHECK-NEXT: %6 = builtin "tensorflowSend_0"<TensorHandle<Float>>(%5 : $TensorHandle<Float>) : $()
// CHECK-NEXT: %7 = string_literal utf8 "/device:CPU:0"
// CHECK-NEXT: %8 = builtin "__tfop_Add,$in,$in,device"(%1 : $TensorHandle<Float>, %1 : $TensorHandle<Float>, %7 : $Builtin.RawPointer) : $TensorHandle<Float>
// CHECK-NEXT: return %8 : $TensorHandle<Float>
// CHECK-NEXT: %2 = metatype $@thick Float.Type
// CHECK-NEXT: %3 = string_literal utf8 "/device:CPU:0"
// CHECK-NEXT: %4 = builtin "__tfop_Add,$in,$in,T,device"(%0 : $TensorHandle<Float>, %0 : $TensorHandle<Float>, %2 : $@thick Float.Type, %3 : $Builtin.RawPointer) : $TensorHandle<Float>
// CHECK-NEXT: %5 = metatype $@thick Float.Type
// CHECK-NEXT: %6 = string_literal utf8 "/device:CPU:0"
// CHECK-NEXT: %7 = builtin "__tfop_Sub,$in,$in,T,device"(%4 : $TensorHandle<Float>, %4 : $TensorHandle<Float>, %5 : $@thick Float.Type, %6 : $Builtin.RawPointer) : $TensorHandle<Float>
// CHECK-NEXT: %8 = builtin "tensorflowSend_0"<TensorHandle<Float>>(%7 : $TensorHandle<Float>) : $()
// CHECK-NEXT: %9 = metatype $@thick Float.Type
// CHECK-NEXT: %10 = string_literal utf8 "/device:CPU:0"
// CHECK-NEXT: %11 = builtin "__tfop_Add,$in,$in,T,device"(%1 : $TensorHandle<Float>, %1 : $TensorHandle<Float>, %9 : $@thick Float.Type, %10 : $Builtin.RawPointer) : $TensorHandle<Float>
// CHECK-NEXT: return %11 : $TensorHandle<Float>


// CHECK-LABEL: --- TFPartition Host Result: {{.*}}testTensor{{.*}}
Expand Down Expand Up @@ -65,8 +65,8 @@ public func testScalar(f: Float) { // expected-warning {{'f' implicitly copied t
// CHECK-NEXT: %3 = integer_literal $Builtin.Int32, 1
// CHECK: [[CONST:%.*]] = builtin "__tfop_Const,dtype$dtype,value$tensor,device"(%3 : $Builtin.Int32, %2 : $Builtin.FPIEEE32, {{.*}}) : $TensorHandle<Builtin.FPIEEE32>
// CHECK-NEXT: [[CAST:%.*]] = unchecked_ref_cast [[CONST]] : $TensorHandle<Builtin.FPIEEE32> to $TensorHandle<Float>
// CHECK: [[ADD1:%.*]] = builtin "__tfop_Add,$in,$in,device"(%1 : $TensorHandle<Float>, [[CAST]] : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK: [[ADD2:%.*]] = builtin "__tfop_Add,$in,$in,device"([[ADD1]] : $TensorHandle<Float>, [[ADD1:%.*]] : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK: [[ADD1:%.*]] = builtin "__tfop_Add,$in,$in,T,device"(%1 : $TensorHandle<Float>, [[CAST]] : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is the addition of the "T" attr from raw ops work by @LaurentMazare? What is the value of having such explicit attr (if none, it'd be nice to keep SIL simple as possible)? From https://github.com/tensorflow/tensorflow/blob/5fa7b03a255d3c0d05aa48e7604a94185ef6b9e2/tensorflow/core/ops/math_ops.cc#L302, it seems T can be inferred from the operands x and y.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since this test change is not introduced by this PR, pls feel free to not get blocked, but we'll want to follow up with Laurent on this.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mhong, that's a good point, in the generated wrappers, we always specify this "T" attr explicitly as I don't think there is an easy way to tell from the ops descriptions whether it could be inferred or not.
We could have some heuristics about removing all the "T" attributes, not sure how robust this would be though.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks Laurent for the context. It seems the info in TF REGISTER_OP (cc files) and ops.pbtxt is not exactly the same, and the latter is the superset of the former? e.g. attr "T" for op "Add" is only present in ops.pbtxt.

Given your work is based on ops.pbtxt, I agree that respecting the info there (and not having additional info/heuristics to override/remove stuff) is probably the simplest and most robust for now.

// CHECK: [[ADD2:%.*]] = builtin "__tfop_Add,$in,$in,T,device"([[ADD1]] : $TensorHandle<Float>, [[ADD1:%.*]] : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK-NEXT: return [[ADD2]] : $TensorHandle<Float>
// CHECK-NEXT: }

Expand Down Expand Up @@ -108,8 +108,8 @@ public func testExitBranch1(i: Int) {
// CHECK-NEXT: %1 = integer_literal $Builtin.Int32, 1
// CHECK: %3 = builtin "__tfop_Const,dtype$dtype,value$tensor,device"(%1 : $Builtin.Int32, %0 : $Builtin.FPIEEE32, {{.*}}) : $TensorHandle<Builtin.FPIEEE32>
// CHECK-NEXT: %4 = unchecked_ref_cast %3 : $TensorHandle<Builtin.FPIEEE32> to $TensorHandle<Float>
// CHECK: %6 = builtin "__tfop_Add,$in,$in,device"(%4 : $TensorHandle<Float>, %4 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK-NEXT: return %6 : $TensorHandle<Float>
// CHECK: %7 = builtin "__tfop_Add,$in,$in,T,device"(%4 : $TensorHandle<Float>, %4 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK-NEXT: return %7 : $TensorHandle<Float>
// CHECK-NEXT: }


Expand Down Expand Up @@ -151,7 +151,7 @@ public func testExitBranch2(i: Int) {
// CHECK: cond_br {{.*}}, bb2, bb1

// CHECK: bb1:
// CHECK: builtin "__tfop_Add,$in,$in,device"(
// CHECK: builtin "__tfop_Add,$in,$in,T,device"(
// CHECK-NEXT: builtin "tensorflowSend_0"<TensorHandle<Float>>(
// CHECK-NEXT: br bb2

Expand Down Expand Up @@ -213,7 +213,7 @@ public func test_bool_param2(cond: Bool, // expected-warning {{'cond' implicitly
// CHECK-LABEL: --- TFPartition Accelerator Result: {{.*}}test_bool_param2{{.*}}
// CHECK: sil private @{{.*}}test_bool_param2{{.*}}
// CHECK: bb0(%0 : $TensorHandle<Float>, %1 : $TensorHandle<Float>, %2 : $TensorHandle<Builtin.Int1>):
// CHECK: builtin "__tfop_Add,$in,$in,device"(%0 : $TensorHandle<Float>, %1 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK: builtin "__tfop_Add,$in,$in,T,device"(%0 : $TensorHandle<Float>, %1 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK-NEXT: [[BOOL:%.*]] = builtin "tf_tensor_to_i1"(%2 : $TensorHandle<Builtin.Int1>) : $Builtin.Int1
// CHECK-NEXT: cond_br [[BOOL]]
// ...
Expand Down Expand Up @@ -253,12 +253,12 @@ public func test_while1(maxCount: Int, // expected-warning {{'maxCount' implici
// CHECK-NEXT: integer_literal $Builtin.Int64, 0
// CHECK-NEXT: integer_literal $Builtin.Int32, 9
// CHECK: builtin "__tfop_Const,dtype$dtype,value$tensor,device"(
// CHECK: builtin "__tfop_Add,$in,$in,device"(%0 : $TensorHandle<Float>, %1 : $TensorHandle<Float>
// CHECK: builtin "__tfop_Add,$in,$in,T,device"(%0 : $TensorHandle<Float>, %1 : $TensorHandle<Float>
// CHECK-NEXT: builtin "tf_tensor_to_i1"(
// CHECK-NEXT: cond_br {{.*}}, bb2, bb1

// CHECK: bb3([[A:%.*]] : $TensorHandle<Float>, [[COUNT:%.*]] : $TensorHandle<Builtin.Int64>):
// CHECK: [[NEXTA:%.*]] = builtin "__tfop_Sub,$in,$in,device"([[A]] : $TensorHandle<Float>, %1 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK: [[NEXTA:%.*]] = builtin "__tfop_Sub,$in,$in,T,device"([[A]] : $TensorHandle<Float>, %1 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK: [[NEXTCOUNT:%.*]] = builtin "__tfop_Add,$in,$in,device"([[COUNT]] : $TensorHandle<Builtin.Int64>,
// CHECK: [[CONDT:%.*]] = builtin "__tfop_Less,$in,$in,device"([[NEXTCOUNT]] : $TensorHandle<Builtin.Int64>,
// CHECK-NEXT: [[COND:%.*]] = builtin "tf_tensor_to_i1"([[CONDT]] : $TensorHandle<Builtin.Int1>) : $Builtin.Int1
Expand Down Expand Up @@ -305,16 +305,16 @@ public func scalar_manipulation(a : Float) -> Tensor<Float> {
// CHECK: %5 = builtin "__tfop_Const,dtype$dtype,value$tensor,device"(%3 : $Builtin.Int32, %2 : $Builtin.FPIEEE32, {{.*}}) : $TensorHandle<Builtin.FPIEEE32>
// CHECK-NEXT: %6 = unchecked_ref_cast %5 : $TensorHandle<Builtin.FPIEEE32> to $TensorHandle<Float>

// CHECK: %8 = builtin "__tfop_Add,$in,$in,device"(%1 : $TensorHandle<Float>, %6 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK-NEXT: %9 = builtin "tensorflowSend_1"<TensorHandle<Float>>(%8 : $TensorHandle<Float>) : $()
// CHECK-NEXT: %10 = float_literal $Builtin.FPIEEE32, 0x40000000
// CHECK-NEXT: %11 = integer_literal $Builtin.Int32, 1
// CHECK: %13 = builtin "__tfop_Const,dtype$dtype,value$tensor,device"(%11 : $Builtin.Int32, %10 : $Builtin.FPIEEE32, {{.*}}) : $TensorHandle<Builtin.FPIEEE32>
// CHECK-NEXT: %14 = builtin "tensorflowReceive_0"<TensorHandle<Builtin.FPIEEE32>>() : $TensorHandle<Builtin.FPIEEE32>
// CHECK: %16 = builtin "__tfop_Add,$in,$in,device"(%14 : $TensorHandle<Builtin.FPIEEE32>, %13 : $TensorHandle<Builtin.FPIEEE32>, {{.*}}) : $TensorHandle<Builtin.FPIEEE32>
// CHECK-NEXT: %17 = unchecked_ref_cast %16 : $TensorHandle<Builtin.FPIEEE32> to $TensorHandle<Float>
// CHECK: %19 = builtin "__tfop_Add,$in,$in,device"(%17 : $TensorHandle<Float>, %17 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK-NEXT: return %19 : $TensorHandle<Float>
// CHECK: %9 = builtin "__tfop_Add,$in,$in,T,device"(%1 : $TensorHandle<Float>, %6 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK-NEXT: %10 = builtin "tensorflowSend_1"<TensorHandle<Float>>(%9 : $TensorHandle<Float>) : $()
// CHECK-NEXT: %11 = float_literal $Builtin.FPIEEE32, 0x40000000
// CHECK-NEXT: %12 = integer_literal $Builtin.Int32, 1
// CHECK: %14 = builtin "__tfop_Const,dtype$dtype,value$tensor,device"(%12 : $Builtin.Int32, %11 : $Builtin.FPIEEE32, {{.*}}) : $TensorHandle<Builtin.FPIEEE32>
// CHECK-NEXT: %15 = builtin "tensorflowReceive_0"<TensorHandle<Builtin.FPIEEE32>>() : $TensorHandle<Builtin.FPIEEE32>
// CHECK: %17 = builtin "__tfop_Add,$in,$in,device"(%15 : $TensorHandle<Builtin.FPIEEE32>, %14 : $TensorHandle<Builtin.FPIEEE32>, {{.*}}) : $TensorHandle<Builtin.FPIEEE32>
// CHECK-NEXT: %18 = unchecked_ref_cast %17 : $TensorHandle<Builtin.FPIEEE32> to $TensorHandle<Float>
// CHECK: %21 = builtin "__tfop_Add,$in,$in,T,device"(%18 : $TensorHandle<Float>, %18 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK-NEXT: return %21 : $TensorHandle<Float>
// CHECK-NEXT:}


Expand All @@ -327,10 +327,10 @@ public func testCast(x: Tensor<Float>) -> Tensor<Int32> {
// CHECK-LABEL: --- TFPartition Accelerator Result: {{.*}}testCast
// CHECK: sil private @{{.*}}testCast{{.*}} : $@callee_owned (TensorHandle<Float>) -> TensorHandle<Int32> {
// CHECK: bb0(%0 : $TensorHandle<Float>):
// CHECK: %2 = builtin "__tfop_Add,$in,$in,device"(%0 : $TensorHandle<Float>, %0 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK: %3 = metatype $@thick Int32.Type
// CHECK: %5 = builtin "__tfop_Cast,$in,DstT,device"(%2 : $TensorHandle<Float>, %3 : $@thick Int32.Type, {{.*}}) : $TensorHandle<Int32>
// CHECK: return %5 : $TensorHandle<Int32>
// CHECK: %3 = builtin "__tfop_Add,$in,$in,T,device"(%0 : $TensorHandle<Float>, %0 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK: %5 = metatype $@thick Int32.Type
// CHECK: %7 = builtin "__tfop_Cast,$in,SrcT,DstT,device"(%3 : $TensorHandle<Float>, %4 : $@thick Float.Type, %5 : $@thick Int32.Type, {{.*}}) : $TensorHandle<Int32>
// CHECK: return %7 : $TensorHandle<Int32>



Expand All @@ -351,7 +351,7 @@ public func testInputListArguments(a: TensorHandle<Float>, b: Tensor<Float>) ->
CHECK: [[PACK1:%.*]] = builtin "__tfop_Pack,$in,$inelt,$inelt,$inelt,device"(%2 : $@thin TensorHandle<Float>.Type, %0 : $TensorHandle<Float>, %0 : $TensorHandle<Float>, %0 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
CHECK: [[TYPE:%.*]] = metatype $@thin Tensor<Float>.Type
CHECK: [[PACK2:%.*]] = builtin "__tfop_Pack,$in,$inelt,$inelt,$inelt,device"([[TYPE]] : $@thin Tensor<Float>.Type, %1 : $TensorHandle<Float>, %1 : $TensorHandle<Float>, %1 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
CHECK: [[RET:%.*]] = builtin "__tfop_Add,$in,$in,device"([[PACK1]] : $TensorHandle<Float>, [[PACK2]] : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
CHECK: [[RET:%.*]] = builtin "__tfop_Add,$in,$in,T,device"([[PACK1]] : $TensorHandle<Float>, [[PACK2]] : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
CHECK: return [[RET]] : $TensorHandle<Float>
CHECK: }
*/
Expand All @@ -378,7 +378,7 @@ public func liveOutTest(

/*
CHECK-LABEL: --- TFPartition Host Result: {{.*}}liveOutTest{{.*}}
CHECK: @{{.*}}liveOutTest{{.*}} : $@convention(thin) (@owned Tensor<Float>, @owned Tensor<Float>, @owned Tensor<Float>) -> @owned Tensor<Float> {
CHECK: @{{.*}}liveOutTest{{.*}} : $@convention(thin) (@guaranteed Tensor<Float>, @guaranteed Tensor<Float>, @guaranteed Tensor<Float>) -> @owned Tensor<Float> {

// [[RESULTBUF:%.*]] = alloc_stack $OpaquePointer
// [[RESULTACCESS:%.*]] = begin_access [modify] [static] [[RESULTBUF]] : $*OpaquePointer
Expand Down Expand Up @@ -441,8 +441,8 @@ func shouldntInline(_ a: Tensor<Float>) -> Tensor<Float> {

// CHECK-LABEL: --- TFPartition Accelerator Result: {{.*}}shouldntInline
// CHECK: bb0(%0 : $TensorHandle<Float>):
// CHECK: %2 = builtin "__tfop_Mul,$in,$in,device"(%0 : $TensorHandle<Float>, %0 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK: return %2 : $TensorHandle<Float>
// CHECK: %3 = builtin "__tfop_Mul,$in,$in,T,device"(%0 : $TensorHandle<Float>, %0 : $TensorHandle<Float>, {{.*}}) : $TensorHandle<Float>
// CHECK: return %3 : $TensorHandle<Float>
// CHECK-LABEL: ----

public func testNotInlined() {
Expand Down Expand Up @@ -514,5 +514,5 @@ public func test77437755(_ hiddenSize: Float) {
// CHECK-LABEL: ---- INPUT FUNCTION {{.*}}test77437755{{.*}} ----------
// CHECK: [[STDDEV:%.*]] = builtin "fdiv_FPIEEE32"
// CHECK: [[STDDEVT:%.*]] = builtin "__tfop_tfc.scalarToTensor,$in"([[STDDEV]] : $Builtin.FPIEEE32) : $TensorHandle<Float>
// CHECK: builtin "__tfop_Mul,$in,$in"({{.*}} : $TensorHandle<Float>, [[STDDEVT]] : $TensorHandle<Float>)
// CHECK: builtin "__tfop_Mul,$in,$in,T"({{.*}} : $TensorHandle<Float>, [[STDDEVT]] : $TensorHandle<Float>, {{.*}})
// CHECK-LABEL: ---- END OF INPUT FUNCTION ----------
3 changes: 0 additions & 3 deletions test/TensorFlow/retain_release.swift
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
// RUN: %target-swift-frontend -Xllvm -tf-dump-intermediates -O -emit-sil %s -o -
// RUN: %target-swift-frontend -Xllvm -tf-dump-intermediates -O -emit-sil %s -verify | %FileCheck %s

// FIXME(b/78371828): Should this test work with optimized_stdlib?
// UNSUPPORTED: optimized_stdlib

import TensorFlow

// Unit tests on generating balanced retain/release SIL instructions.
Expand Down
3 changes: 3 additions & 0 deletions test/TensorFlowRuntime/shaped_array.swift
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
import TensorFlow
import StdlibUnittest

// TODO(SR-7983): Investigate why this is necessary.
import SwiftOnoneSupport

var ShapedArrayTests = TestSuite("ShapedArrayTests")

// TODO: add full Collection scalar test suite.
Expand Down
3 changes: 3 additions & 0 deletions test/TensorFlowRuntime/sync_runtime.swift
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ import StdlibUnittest
import CTensorFlow
import TensorFlow

// TODO(SR-7983): Investigate why this is necessary.
import SwiftOnoneSupport

var RuntimeTests = TestSuite("SyncRuntime")

_RuntimeConfig.usesSynchronousExecution = true
Expand Down