Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit d2d2484

Browse files
committed
Apply the final naming decision in [SE-0253](https://github.com/apple/swift-evolution/blob/master/proposals/0253-callable.md) to the TensorFlow library. This depends on [swiftlang/swift#25235](swiftlang/swift#25235).
Shall be merged in time for the Swift for TensorFlow 0.4 release.
1 parent 802af3e commit d2d2484

File tree

10 files changed

+34
-34
lines changed

10 files changed

+34
-34
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ struct Model: Layer {
2929
var layer3 = Dense<Float>(inputSize: hiddenSize, outputSize: 3, activation: identity)
3030

3131
@differentiable
32-
func call(_ input: Tensor<Float>) -> Tensor<Float> {
32+
func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
3333
return input.sequenced(through: layer1, layer2, layer3)
3434
}
3535
}

Sources/TensorFlow/Layer.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ public protocol Layer: Differentiable & KeyPathIterable
3131
/// - Parameter input: The input to the layer.
3232
/// - Returns: The output.
3333
@differentiable
34-
func call(_ input: Input) -> Output
34+
func callAsFunction(_ input: Input) -> Output
3535
}
3636

3737
public extension Layer {

Sources/TensorFlow/Layers/Convolutional.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ public struct Conv1D<Scalar: TensorFlowFloatingPoint>: Layer {
5959
/// - Parameter input: The input to the layer `[batchCount, width, inputChannels]`.
6060
/// - Returns: The output `[batchCount, newWidth, outputChannels]`.
6161
@differentiable
62-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
62+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
6363
let conv2D = input.expandingShape(at: 1).convolved2D(
6464
withFilter: filter.expandingShape(at: 0), strides: (1, 1, stride, 1), padding: padding)
6565
return activation(conv2D.squeezingShape(at: 1) + bias)
@@ -177,7 +177,7 @@ public struct Conv2D<Scalar: TensorFlowFloatingPoint>: Layer {
177177
/// - Parameter input: The input to the layer.
178178
/// - Returns: The output.
179179
@differentiable
180-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
180+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
181181
return activation(input.convolved2D(withFilter: filter,
182182
strides: (1, strides.0, strides.1, 1),
183183
padding: padding) + bias)
@@ -293,7 +293,7 @@ public struct Conv3D<Scalar: TensorFlowFloatingPoint>: Layer {
293293
/// - Parameter input: The input to the layer.
294294
/// - Returns: The output.
295295
@differentiable
296-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
296+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
297297
return activation(input.convolved3D(withFilter: filter,
298298
strides: (1, strides.0, strides.1, strides.2, 1),
299299
padding: padding) + bias)
@@ -411,7 +411,7 @@ public struct TransposedConv2D: Layer {
411411
/// - Parameter input: The input to the layer.
412412
/// - Returns: The output.
413413
@differentiable
414-
public func call(_ input: Tensor<Float>) -> Tensor<Float> {
414+
public func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
415415
let batchSize = input.shape[0]
416416
let w = (input.shape[1] - (1 * paddingIndex)) *
417417
strides.0 + (filter.shape[0] * paddingIndex)

Sources/TensorFlow/Layers/Core.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ public struct Dropout<Scalar: TensorFlowFloatingPoint>: Layer {
5353
/// - Parameter input: The input to the layer.
5454
/// - Returns: The output.
5555
@differentiable(vjp: _vjpApplied(to:))
56-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
56+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
5757
switch Context.local.learningPhase {
5858
case .training:
5959
return applyingTraining(to: input)
@@ -92,7 +92,7 @@ public struct Flatten<Scalar: TensorFlowFloatingPoint>: Layer {
9292
/// - Parameter input: The input to the layer.
9393
/// - Returns: The output.
9494
@differentiable
95-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
95+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
9696
let batchSize = input.shape[0]
9797
let remaining = input.shape[1..<input.rank].contiguousSize
9898
return input.reshaped(to: [batchSize, remaining])
@@ -128,7 +128,7 @@ public struct Reshape<Scalar: TensorFlowFloatingPoint>: Layer {
128128
/// - Parameter input: The input to the layer.
129129
/// - Returns: The output.
130130
@differentiable
131-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
131+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
132132
return input.reshaped(toShape: shape)
133133
}
134134
}
@@ -163,7 +163,7 @@ public struct Dense<Scalar: TensorFlowFloatingPoint>: Layer {
163163
/// - Parameter input: The input to the layer.
164164
/// - Returns: The output.
165165
@differentiable
166-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
166+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
167167
return activation(matmul(input, weight) + bias)
168168
}
169169
}

Sources/TensorFlow/Layers/Normalization.swift

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ public struct BatchNorm<Scalar: TensorFlowFloatingPoint>: Layer {
9090
/// - Parameter input: The input to the layer.
9191
/// - Returns: The output.
9292
@differentiable(vjp: _vjpApplied(to:))
93-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
93+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
9494
switch Context.local.learningPhase {
9595
case .training:
9696
return applyingTraining(to: input)
@@ -185,7 +185,7 @@ public struct LayerNorm<Scalar: TensorFlowFloatingPoint>: Layer {
185185
/// - Parameter input: The input to the layer.
186186
/// - Returns: The output.
187187
@differentiable
188-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
188+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
189189
let mean = input.mean(alongAxes: axis)
190190
let variance = input.variance(alongAxes: axis)
191191
let inv = rsqrt(variance + epsilon) * scale

Sources/TensorFlow/Layers/Pooling.swift

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ public struct MaxPool1D<Scalar: TensorFlowFloatingPoint>: Layer {
4343
/// - Parameter input: The input to the layer.
4444
/// - Returns: The output.
4545
@differentiable
46-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
46+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
4747
return input.expandingShape(at: 1).maxPooled2D(
4848
kernelSize: (1, 1, poolSize, 1), strides: (1, 1, stride, 1), padding: padding
4949
).squeezingShape(at: 1)
@@ -77,7 +77,7 @@ public struct MaxPool2D<Scalar: TensorFlowFloatingPoint>: Layer {
7777
/// - Parameter input: The input to the layer.
7878
/// - Returns: The output.
7979
@differentiable
80-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
80+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
8181
return input.maxPooled2D(
8282
kernelSize: poolSize, strides: strides, padding: padding)
8383
}
@@ -124,7 +124,7 @@ public struct MaxPool3D<Scalar: TensorFlowFloatingPoint>: Layer {
124124
/// - Parameter input: The input to the layer.
125125
/// - Returns: The output.
126126
@differentiable
127-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
127+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
128128
return input.maxPooled3D(kernelSize: poolSize, strides: strides, padding: padding)
129129
}
130130
}
@@ -184,7 +184,7 @@ public struct AvgPool1D<Scalar: TensorFlowFloatingPoint>: Layer {
184184
/// - Parameter input: The input to the layer.
185185
/// - Returns: The output.
186186
@differentiable
187-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
187+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
188188
return input.expandingShape(at: 1).averagePooled2D(
189189
kernelSize: (1, 1, poolSize, 1), strides: (1, 1, stride, 1), padding: padding
190190
).squeezingShape(at: 1)
@@ -218,7 +218,7 @@ public struct AvgPool2D<Scalar: TensorFlowFloatingPoint>: Layer {
218218
/// - Parameter input: The input to the layer.
219219
/// - Returns: The output.
220220
@differentiable
221-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
221+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
222222
return input.averagePooled2D(kernelSize: poolSize, strides: strides, padding: padding)
223223
}
224224
}
@@ -264,7 +264,7 @@ public struct AvgPool3D<Scalar: TensorFlowFloatingPoint>: Layer {
264264
/// - Parameter input: The input to the layer.
265265
/// - Returns: The output.
266266
@differentiable
267-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
267+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
268268
return input.averagePooled3D(kernelSize: poolSize, strides: strides, padding: padding)
269269
}
270270
}
@@ -304,7 +304,7 @@ public struct GlobalAvgPool1D<Scalar: TensorFlowFloatingPoint>: Layer {
304304
/// - Parameter input: The input to the layer.
305305
/// - Returns: The output.
306306
@differentiable
307-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
307+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
308308
return input.mean(squeezingAxes: 1)
309309
}
310310
}
@@ -320,7 +320,7 @@ public struct GlobalAvgPool2D<Scalar: TensorFlowFloatingPoint>: Layer {
320320
/// - Parameter input: The input to the layer.
321321
/// - Returns: The output.
322322
@differentiable
323-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
323+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
324324
return input.mean(squeezingAxes: [1, 2])
325325
}
326326
}
@@ -336,7 +336,7 @@ public struct GlobalAvgPool3D<Scalar: TensorFlowFloatingPoint>: Layer {
336336
/// - Parameter input: The input to the layer.
337337
/// - Returns: The output.
338338
@differentiable
339-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
339+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
340340
return input.mean(squeezingAxes: [1, 2, 3])
341341
}
342342
}
@@ -355,7 +355,7 @@ public struct GlobalMaxPool1D<Scalar: TensorFlowFloatingPoint>: Layer {
355355
/// phase.
356356
/// - Returns: The output.
357357
@differentiable
358-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
358+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
359359
return input.max(squeezingAxes: 1)
360360
}
361361
}
@@ -371,7 +371,7 @@ public struct GlobalMaxPool2D<Scalar: TensorFlowFloatingPoint>: Layer {
371371
/// - Parameter input: The input to the layer.
372372
/// - Returns: The output.
373373
@differentiable
374-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
374+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
375375
return input.max(squeezingAxes: [1, 2])
376376
}
377377
}
@@ -387,7 +387,7 @@ public struct GlobalMaxPool3D<Scalar: TensorFlowFloatingPoint>: Layer {
387387
/// - Parameter input: The input to the layer.
388388
/// - Returns: The output.
389389
@differentiable
390-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
390+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
391391
return input.max(squeezingAxes: [1, 2, 3])
392392
}
393393
}

Sources/TensorFlow/Layers/Recurrent.swift

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ public extension RNNCell {
6262
/// - previousState: The previous state of the RNN cell.
6363
/// - Returns: The output.
6464
@differentiable
65-
func call(input: TimeStepInput, state: State) -> RNNCellOutput<TimeStepOutput, State> {
65+
func callAsFunction(input: TimeStepInput, state: State) -> RNNCellOutput<TimeStepOutput, State> {
6666
return self(RNNCellInput(input: input, state: state))
6767
}
6868
}
@@ -113,7 +113,7 @@ public struct SimpleRNNCell<Scalar: TensorFlowFloatingPoint>: RNNCell, VectorNum
113113
/// - Parameter input: The input to the layer.
114114
/// - Returns: The hidden state.
115115
@differentiable
116-
public func call(_ input: Input) -> Output {
116+
public func callAsFunction(_ input: Input) -> Output {
117117
let concatenatedInput = input.input.concatenated(with: input.state.value, alongAxis: 1)
118118
let newState = State(tanh(matmul(concatenatedInput, weight) + bias))
119119
return Output(output: newState, state: newState)
@@ -175,7 +175,7 @@ public struct LSTMCell<Scalar: TensorFlowFloatingPoint>: RNNCell, VectorNumeric
175175
/// - Parameter input: The input to the layer.
176176
/// - Returns: The hidden state.
177177
@differentiable
178-
public func call(_ input: Input) -> Output {
178+
public func callAsFunction(_ input: Input) -> Output {
179179
let gateInput = input.input.concatenated(with: input.state.hidden, alongAxis: 1)
180180

181181
let inputGate = sigmoid(matmul(gateInput, inputWeight) + inputBias)
@@ -203,7 +203,7 @@ public struct RNN<Cell: RNNCell>: Layer {
203203
}
204204

205205
@differentiable(wrt: (self, input), vjp: _vjpCall(_:initialState:))
206-
public func call(_ input: [Cell.TimeStepInput],
206+
public func callAsFunction(_ input: [Cell.TimeStepInput],
207207
initialState: Cell.State) -> [Cell.TimeStepOutput] {
208208
var currentHiddenState = initialState
209209
var timeStepOutputs: [Cell.TimeStepOutput] = []
@@ -253,7 +253,7 @@ public struct RNN<Cell: RNNCell>: Layer {
253253
}
254254

255255
@differentiable(wrt: (self, inputs))
256-
public func call(_ inputs: [Cell.TimeStepInput]) -> [Cell.TimeStepOutput] {
256+
public func callAsFunction(_ inputs: [Cell.TimeStepInput]) -> [Cell.TimeStepOutput] {
257257
return self(inputs, initialState: cell.zeroState.withoutDerivative())
258258
}
259259

Sources/TensorFlow/Layers/Upsampling.swift

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ public struct UpSampling1D<Scalar: TensorFlowFloatingPoint>: Layer {
2929
/// - Parameter input: The input to the layer.
3030
/// - Returns: The output.
3131
@differentiable
32-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
32+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
3333
let shape = input.shape
3434
let (batchSize, timesteps, channels) = (shape[0], shape[1], shape[2])
3535
let scaleOnes = Tensor<Scalar>(ones: [1, 1, size, 1])
@@ -55,7 +55,7 @@ public struct UpSampling2D<Scalar: TensorFlowFloatingPoint>: Layer {
5555
/// - Parameter input: The input to the layer.
5656
/// - Returns: The output.
5757
@differentiable
58-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
58+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
5959
let shape = input.shape
6060
let (batchSize, height, width, channels) = (shape[0], shape[1], shape[2], shape[3])
6161
let scaleOnes = Tensor<Scalar>(ones: [1, 1, size, 1, size, 1])
@@ -107,7 +107,7 @@ public struct UpSampling3D<Scalar: TensorFlowFloatingPoint>: Layer {
107107
/// - Parameter input: The input to the layer.
108108
/// - Returns: The output.
109109
@differentiable
110-
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
110+
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
111111
var result = repeatingElements(input, alongAxis: 1, count: size)
112112
result = repeatingElements(result, alongAxis: 2, count: size)
113113
result = repeatingElements(result, alongAxis: 3, count: size)

Tests/TensorFlowTests/SequentialTests.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ final class SequentialTests: XCTestCase {
2424
seed: (0xeffeffe, 0xfffe))
2525

2626
@differentiable
27-
func call(_ input: Tensor<Float>) -> Tensor<Float> {
27+
func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
2828
return input.sequenced(through: dense1, dense2)
2929
}
3030
}

Tests/TensorFlowTests/TrivialModelTests.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ final class TrivialModelTests: XCTestCase {
3434
)
3535
}
3636
@differentiable
37-
func call(_ input: Tensor<Float>) -> Tensor<Float> {
37+
func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
3838
let h1 = l1(input)
3939
return l2(h1)
4040
}

0 commit comments

Comments
 (0)