Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Fix deprecation warnings for method-style differential operators. #577

Merged
merged 3 commits into from
Dec 7, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions Sources/TensorFlow/Core/DifferentialOperators.swift
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

public extension Differentiable {
@available(*, deprecated, message: """
Method-style differential operators are deprecated and will be removed; use top-level
Method-style differential operators are deprecated and will be removed; use top-level \
function 'TensorFlow.gradient(at:in:)' instead
""")
@inlinable
Expand All @@ -29,7 +29,7 @@ public extension Differentiable {
}

@available(*, deprecated, message: """
Method-style differential operators are deprecated and will be removed; use top-level
Method-style differential operators are deprecated and will be removed; use top-level \
function 'TensorFlow.valueWithGradient(at:in:)' instead
""")
@inlinable
Expand All @@ -45,7 +45,7 @@ public extension Differentiable {
}

@available(*, deprecated, message: """
Method-style differential operators are deprecated and will be removed; use top-level
Method-style differential operators are deprecated and will be removed; use top-level \
function 'TensorFlow.gradient(at:_:in:)' instead
""")
@inlinable
Expand All @@ -57,7 +57,7 @@ public extension Differentiable {
}

@available(*, deprecated, message: """
Method-style differential operators are deprecated and will be removed; use top-level
Method-style differential operators are deprecated and will be removed; use top-level \
function 'TensorFlow.valueWithGradient(at:_:in:)' instead
""")
@inlinable
Expand Down
8 changes: 4 additions & 4 deletions Tests/TensorFlowTests/LayerTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ final class LayerTests: XCTestCase {
expected: y)
withTensorLeakChecking {
for _ in 0..<10 {
let 𝛁model = model.gradient { model -> Tensor<Float> in
let 𝛁model = gradient(at: model) { model -> Tensor<Float> in
meanSquaredError(
predicted: model(x).squeezingShape(at: 1),
expected: y)
Expand Down Expand Up @@ -1142,7 +1142,7 @@ final class LayerTests: XCTestCase {
let rnn = RNN(SimpleRNNCell<Float>(inputSize: 4, hiddenSize: 4,
seed: (0xFeed, 0xBeef)))
withTensorLeakChecking {
let (outputs, _) = rnn.valueWithPullback(at: inputs) { rnn, inputs in
let (outputs, _) = valueWithPullback(at: rnn, inputs) { rnn, inputs in
return rnn(inputs)
}
assertEqual(
Expand Down Expand Up @@ -1173,7 +1173,7 @@ final class LayerTests: XCTestCase {
let inputs: [Tensor<Float>] = Array(repeating: x, count: 4)
let rnn = RNN(LSTMCell<Float>(inputSize: 4, hiddenSize: 4))
withTensorLeakChecking {
let (outputs, _) = rnn.valueWithPullback(at: inputs) { rnn, inputs in
let (outputs, _) = valueWithPullback(at: rnn, inputs) { rnn, inputs in
return rnn(inputs)
}
assertEqual(
Expand Down Expand Up @@ -1204,7 +1204,7 @@ final class LayerTests: XCTestCase {
biasInitializer: zeros())
)
withTensorLeakChecking {
let (outputs, _) = rnn.valueWithPullback(at: inputs) { rnn, inputs in
let (outputs, _) = valueWithPullback(at: rnn, inputs) { rnn, inputs in
return rnn(inputs)
}
XCTAssertEqual(outputs.map { $0.hidden },
Expand Down
16 changes: 8 additions & 8 deletions Tests/TensorFlowTests/OperatorTests/BasicTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,8 @@ final class BasicOperatorTests: XCTestCase {
func testVJPPadded() {
let x = Tensor<Float>(ones: [3, 2])
let target = Tensor<Float>([[2, 2], [2, 2], [2, 2]])
let grads = x.gradient { a -> Tensor<Float> in
let paddedTensor = a.padded(forSizes: [(1, 0), (0, 1)], with: 3.0)
let grads = gradient(at: x) { x -> Tensor<Float> in
let paddedTensor = x.padded(forSizes: [(1, 0), (0, 1)], with: 3.0)
return (paddedTensor * paddedTensor).sum()
}
XCTAssertEqual(grads, target)
Expand All @@ -86,8 +86,8 @@ final class BasicOperatorTests: XCTestCase {
func testVJPPaddedConstant() {
let x = Tensor<Float>(ones: [3, 2])
let target = Tensor<Float>([[2, 2], [2, 2], [2, 2]])
let grads = x.gradient { a -> Tensor<Float> in
let paddedTensor = a.padded(forSizes: [(1, 0), (0, 1)], mode: .constant(3.0))
let grads = gradient(at: x) { x -> Tensor<Float> in
let paddedTensor = x.padded(forSizes: [(1, 0), (0, 1)], mode: .constant(3.0))
return (paddedTensor * paddedTensor).sum()
}
XCTAssertEqual(grads, target)
Expand All @@ -96,8 +96,8 @@ final class BasicOperatorTests: XCTestCase {
func testVJPPaddedReflect() {
let x = Tensor<Float>([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
let target = Tensor<Float>([[4, 8, 6], [32, 40, 24], [56, 64, 36]])
let grads = x.gradient { a -> Tensor<Float> in
let paddedTensor = a.padded(forSizes: [(2, 0), (0, 2)], mode: .reflect)
let grads = gradient(at: x) { x -> Tensor<Float> in
let paddedTensor = x.padded(forSizes: [(2, 0), (0, 2)], mode: .reflect)
return (paddedTensor * paddedTensor).sum()
}
XCTAssertEqual(grads, target)
Expand All @@ -106,8 +106,8 @@ final class BasicOperatorTests: XCTestCase {
func testVJPPaddedSymmetric() {
let x = Tensor<Float>([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
let target = Tensor<Float>([[4, 16, 24], [16, 40, 48], [14, 32, 36]])
let grads = x.gradient { a -> Tensor<Float> in
let paddedTensor = a.padded(forSizes: [(2, 0), (0, 2)], mode: .symmetric)
let grads = gradient(at: x) { x -> Tensor<Float> in
let paddedTensor = x.padded(forSizes: [(2, 0), (0, 2)], mode: .symmetric)
return (paddedTensor * paddedTensor).sum()
}
XCTAssertEqual(grads, target)
Expand Down
2 changes: 1 addition & 1 deletion Tests/TensorFlowTests/SequentialTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ final class SequentialTests: XCTestCase {
Context.local.learningPhase = .training
withTensorLeakChecking {
for _ in 0..<1000 {
let 𝛁model = model.gradient { model -> Tensor<Float> in
let 𝛁model = gradient(at: model) { model -> Tensor<Float> in
let ŷ = model(x)
return meanSquaredError(predicted: ŷ, expected: y)
}
Expand Down
2 changes: 1 addition & 1 deletion Tests/TensorFlowTests/TrivialModelTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ final class TrivialModelTests: XCTestCase {
Context.local.learningPhase = .training
withTensorLeakChecking {
for _ in 0..<3000 {
let 𝛁model = classifier.gradient { classifier -> Tensor<Float> in
let 𝛁model = gradient(at: classifier) { classifier -> Tensor<Float> in
let ŷ = classifier(x)
return meanSquaredError(predicted: ŷ, expected: y)
}
Expand Down