Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit ded6f88

Browse files
committed
Apply the final naming decision in [SE-0253](https://github.com/apple/swift-evolution/blob/master/proposals/0253-callable.md) to the TensorFlow library. This depends on [swiftlang/swift#25235](swiftlang/swift#25235).
Shall be merged in time for the Swift for TensorFlow 0.4 release.
1 parent 802af3e commit ded6f88

File tree

7 files changed

+15
-15
lines changed

7 files changed

+15
-15
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ struct Model: Layer {
2929
var layer3 = Dense<Float>(inputSize: hiddenSize, outputSize: 3, activation: identity)
3030

3131
@differentiable
32-
func call(_ input: Tensor<Float>) -> Tensor<Float> {
32+
func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
3333
return input.sequenced(through: layer1, layer2, layer3)
3434
}
3535
}

Sources/TensorFlow/Layer.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ public protocol Layer: Differentiable & KeyPathIterable
3131
/// - Parameter input: The input to the layer.
3232
/// - Returns: The output.
3333
@differentiable
34-
func call(_ input: Input) -> Output
34+
func callAsFunction(_ input: Input) -> Output
3535
}
3636

3737
public extension Layer {

Sources/TensorFlow/Layers/Convolutional.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ public struct Conv1D<Scalar: TensorFlowFloatingPoint>: Layer {
5959
/// - Parameter input: The input to the layer `[batchCount, width, inputChannels]`.
6060
/// - Returns: The output `[batchCount, newWidth, outputChannels]`.
6161
@differentiable
62-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
62+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
6363
let conv2D = input.expandingShape(at: 1).convolved2D(
6464
withFilter: filter.expandingShape(at: 0), strides: (1, 1, stride, 1), padding: padding)
6565
return activation(conv2D.squeezingShape(at: 1) + bias)
@@ -177,7 +177,7 @@ public struct Conv2D<Scalar: TensorFlowFloatingPoint>: Layer {
177177
/// - Parameter input: The input to the layer.
178178
/// - Returns: The output.
179179
@differentiable
180-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
180+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
181181
return activation(input.convolved2D(withFilter: filter,
182182
strides: (1, strides.0, strides.1, 1),
183183
padding: padding) + bias)
@@ -293,7 +293,7 @@ public struct Conv3D<Scalar: TensorFlowFloatingPoint>: Layer {
293293
/// - Parameter input: The input to the layer.
294294
/// - Returns: The output.
295295
@differentiable
296-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
296+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
297297
return activation(input.convolved3D(withFilter: filter,
298298
strides: (1, strides.0, strides.1, strides.2, 1),
299299
padding: padding) + bias)
@@ -411,7 +411,7 @@ public struct TransposedConv2D: Layer {
411411
/// - Parameter input: The input to the layer.
412412
/// - Returns: The output.
413413
@differentiable
414-
public func call(_ input: Tensor<Float>) -> Tensor<Float> {
414+
public func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
415415
let batchSize = input.shape[0]
416416
let w = (input.shape[1] - (1 * paddingIndex)) *
417417
strides.0 + (filter.shape[0] * paddingIndex)

Sources/TensorFlow/Layers/Core.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ public struct Dropout<Scalar: TensorFlowFloatingPoint>: Layer {
5353
/// - Parameter input: The input to the layer.
5454
/// - Returns: The output.
5555
@differentiable(vjp: _vjpApplied(to:))
56-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
56+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
5757
switch Context.local.learningPhase {
5858
case .training:
5959
return applyingTraining(to: input)
@@ -92,7 +92,7 @@ public struct Flatten<Scalar: TensorFlowFloatingPoint>: Layer {
9292
/// - Parameter input: The input to the layer.
9393
/// - Returns: The output.
9494
@differentiable
95-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
95+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
9696
let batchSize = input.shape[0]
9797
let remaining = input.shape[1..<input.rank].contiguousSize
9898
return input.reshaped(to: [batchSize, remaining])
@@ -128,7 +128,7 @@ public struct Reshape<Scalar: TensorFlowFloatingPoint>: Layer {
128128
/// - Parameter input: The input to the layer.
129129
/// - Returns: The output.
130130
@differentiable
131-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
131+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
132132
return input.reshaped(toShape: shape)
133133
}
134134
}
@@ -163,7 +163,7 @@ public struct Dense<Scalar: TensorFlowFloatingPoint>: Layer {
163163
/// - Parameter input: The input to the layer.
164164
/// - Returns: The output.
165165
@differentiable
166-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
166+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
167167
return activation(matmul(input, weight) + bias)
168168
}
169169
}

Sources/TensorFlow/Layers/Upsampling.swift

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ public struct UpSampling1D<Scalar: TensorFlowFloatingPoint>: Layer {
2929
/// - Parameter input: The input to the layer.
3030
/// - Returns: The output.
3131
@differentiable
32-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
32+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
3333
let shape = input.shape
3434
let (batchSize, timesteps, channels) = (shape[0], shape[1], shape[2])
3535
let scaleOnes = Tensor<Scalar>(ones: [1, 1, size, 1])
@@ -55,7 +55,7 @@ public struct UpSampling2D<Scalar: TensorFlowFloatingPoint>: Layer {
5555
/// - Parameter input: The input to the layer.
5656
/// - Returns: The output.
5757
@differentiable
58-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
58+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
5959
let shape = input.shape
6060
let (batchSize, height, width, channels) = (shape[0], shape[1], shape[2], shape[3])
6161
let scaleOnes = Tensor<Scalar>(ones: [1, 1, size, 1, size, 1])
@@ -107,7 +107,7 @@ public struct UpSampling3D<Scalar: TensorFlowFloatingPoint>: Layer {
107107
/// - Parameter input: The input to the layer.
108108
/// - Returns: The output.
109109
@differentiable
110-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
110+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
111111
var result = repeatingElements(input, alongAxis: 1, count: size)
112112
result = repeatingElements(result, alongAxis: 2, count: size)
113113
result = repeatingElements(result, alongAxis: 3, count: size)

Tests/TensorFlowTests/SequentialTests.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ final class SequentialTests: XCTestCase {
2424
seed: (0xeffeffe, 0xfffe))
2525

2626
@differentiable
27-
func call(_ input: Tensor<Float>) -> Tensor<Float> {
27+
func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
2828
return input.sequenced(through: dense1, dense2)
2929
}
3030
}

Tests/TensorFlowTests/TrivialModelTests.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ final class TrivialModelTests: XCTestCase {
3434
)
3535
}
3636
@differentiable
37-
func call(_ input: Tensor<Float>) -> Tensor<Float> {
37+
func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
3838
let h1 = l1(input)
3939
return l2(h1)
4040
}

0 commit comments

Comments
 (0)