Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit 03d8ff4

Browse files
authored
Test swift-models. (#399)
1 parent 5c8b8ee commit 03d8ff4

File tree

2 files changed

+40
-11
lines changed

2 files changed

+40
-11
lines changed

Dockerfile

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,5 +39,25 @@ RUN echo "/usr/local/cuda-10.0/targets/x86_64-linux/lib/stubs" > /etc/ld.so.conf
3939
# Print out swift version for better debugging for toolchain problems
4040
RUN /swift-tensorflow-toolchain/usr/bin/swift --version
4141

42+
# Clean out existing artifacts.
43+
# TODO: move into bash scripts...
44+
RUN rm /swift-tensorflow-toolchain/usr/lib/swift/linux/x86_64/TensorFlow.swiftinterface
45+
RUN rm /swift-tensorflow-toolchain/usr/lib/swift/linux/x86_64/TensorFlow.swiftdoc
46+
RUN rm /swift-tensorflow-toolchain/usr/lib/swift/linux/x86_64/TensorFlow.swiftmodule
47+
RUN rm /swift-tensorflow-toolchain/usr/lib/swift/linux/libswiftTensorFlow.so
48+
4249
# Run SwiftPM tests
4350
RUN /swift-tensorflow-toolchain/usr/bin/swift test
51+
52+
# Install into toolchain
53+
# TODO: Unify this with testing. (currently there is a demangling bug).
54+
RUN /swift-tensorflow-toolchain/usr/bin/swift build -Xswiftc -module-link-name -Xswiftc TensorFlow
55+
RUN cp /swift-apis/.build/debug/TensorFlow.swiftmodule /swift-tensorflow-toolchain/usr/lib/swift/linux/x86_64/
56+
RUN cp /swift-apis/.build/debug/libTensorFlow.so /swift-tensorflow-toolchain/usr/lib/swift/linux/
57+
58+
WORKDIR /
59+
RUN git clone https://github.com/tensorflow/swift-models.git
60+
61+
WORKDIR /swift-models
62+
63+
RUN /swift-tensorflow-toolchain/usr/bin/swift build

Sources/TensorFlow/Loss.swift

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ public func meanAbsoluteError<Scalar: TensorFlowFloatingPoint>(
5252
predicted: Tensor<Scalar>,
5353
expected: Tensor<Scalar>
5454
) -> Tensor<Scalar> {
55-
l1Loss(predicted: predicted, expected: expected, reduction: { $0.mean() })
55+
l1Loss(predicted: predicted, expected: expected, reduction: _mean)
5656
}
5757

5858
/// Returns the mean squared error between predictions and expectations.
@@ -65,7 +65,7 @@ public func meanSquaredError<Scalar: TensorFlowFloatingPoint>(
6565
predicted: Tensor<Scalar>,
6666
expected: Tensor<Scalar>
6767
) -> Tensor<Scalar> {
68-
l2Loss(predicted: predicted, expected: expected, reduction: { $0.mean() })
68+
l2Loss(predicted: predicted, expected: expected, reduction: _mean)
6969
}
7070

7171
/// Returns the mean squared logarithmic error between predictions and expectations.
@@ -83,7 +83,7 @@ public func meanSquaredLogarithmicError<Scalar: TensorFlowFloatingPoint>(
8383
) -> Tensor<Scalar> {
8484
let logPredicted = log(max(predicted, Tensor(0)) + 1)
8585
let logExpected = log(max(expected, Tensor(0)) + 1)
86-
return l2Loss(predicted: logPredicted, expected: logExpected, reduction: { $0.mean() })
86+
return l2Loss(predicted: logPredicted, expected: logExpected, reduction: _mean)
8787
}
8888

8989
/// Returns the mean absolute percentage error between predictions and expectations.
@@ -109,7 +109,7 @@ public func meanAbsolutePercentageError<Scalar: TensorFlowFloatingPoint>(
109109
public func hingeLoss<Scalar: TensorFlowFloatingPoint>(
110110
predicted: Tensor<Scalar>,
111111
expected: Tensor<Scalar>,
112-
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.mean() }
112+
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
113113
) -> Tensor<Scalar> {
114114
reduction(max(Tensor(0), Tensor(1) - expected * predicted))
115115
}
@@ -124,7 +124,7 @@ public func hingeLoss<Scalar: TensorFlowFloatingPoint>(
124124
public func squaredHingeLoss<Scalar: TensorFlowFloatingPoint>(
125125
predicted: Tensor<Scalar>,
126126
expected: Tensor<Scalar>,
127-
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.mean() }
127+
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
128128
) -> Tensor<Scalar> {
129129
reduction(hingeLoss(predicted: predicted, expected: expected).squared())
130130
}
@@ -139,7 +139,7 @@ public func squaredHingeLoss<Scalar: TensorFlowFloatingPoint>(
139139
public func categoricalHingeLoss<Scalar: TensorFlowFloatingPoint>(
140140
predicted: Tensor<Scalar>,
141141
expected: Tensor<Scalar>,
142-
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.mean() }
142+
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
143143
) -> Tensor<Scalar> {
144144
let positive = (expected * predicted).sum(alongAxes: -1)
145145
let negative = ((Tensor(1) - expected) * predicted).max(alongAxes: -1)
@@ -157,7 +157,7 @@ public func categoricalHingeLoss<Scalar: TensorFlowFloatingPoint>(
157157
public func logCoshLoss<Scalar: TensorFlowFloatingPoint>(
158158
predicted: Tensor<Scalar>,
159159
expected: Tensor<Scalar>,
160-
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.mean() }
160+
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
161161
) -> Tensor<Scalar> {
162162
let x = predicted - expected
163163
return reduction(x + softplus(Tensor(-2) * x) - log(Tensor(2)))
@@ -173,7 +173,7 @@ public func logCoshLoss<Scalar: TensorFlowFloatingPoint>(
173173
public func poissonLoss<Scalar: TensorFlowFloatingPoint>(
174174
predicted: Tensor<Scalar>,
175175
expected: Tensor<Scalar>,
176-
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.mean() }
176+
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
177177
) -> Tensor<Scalar> {
178178
reduction(predicted - expected * log(predicted))
179179
}
@@ -194,6 +194,15 @@ public func kullbackLeiblerDivergence<Scalar: TensorFlowFloatingPoint>(
194194
reduction(expected * log(expected / predicted))
195195
}
196196

197+
/// Workaround for cross-module default parameter @differentiable functions.
198+
/// Tensor<Scalar>.mean() is the preferred way to do this.
199+
@differentiable
200+
public func _mean<Scalar: TensorFlowFloatingPoint>(
201+
_ value: Tensor<Scalar>
202+
) -> Tensor<Scalar> {
203+
return value.mean()
204+
}
205+
197206
/// Returns the softmax cross entropy (categorical cross entropy) between logits and labels.
198207
///
199208
/// - Parameters:
@@ -204,7 +213,7 @@ public func kullbackLeiblerDivergence<Scalar: TensorFlowFloatingPoint>(
204213
public func softmaxCrossEntropy<Scalar: TensorFlowFloatingPoint>(
205214
logits: Tensor<Scalar>,
206215
labels: Tensor<Int32>,
207-
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.mean() }
216+
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
208217
) -> Tensor<Scalar> {
209218
reduction(softmaxCrossEntropyHelper(logits: logits, labels: labels))
210219
}
@@ -238,7 +247,7 @@ func _vjpSoftmaxCrossEntropyHelper<Scalar: TensorFlowFloatingPoint>(
238247
public func softmaxCrossEntropy<Scalar: TensorFlowFloatingPoint>(
239248
logits: Tensor<Scalar>,
240249
probabilities: Tensor<Scalar>,
241-
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.mean() }
250+
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
242251
) -> Tensor<Scalar> {
243252
reduction(softmaxCrossEntropyHelper(logits: logits, probabilities: probabilities))
244253
}
@@ -274,7 +283,7 @@ func _vjpSoftmaxCrossEntropyHelper<Scalar: TensorFlowFloatingPoint>(
274283
public func sigmoidCrossEntropy<Scalar: TensorFlowFloatingPoint>(
275284
logits: Tensor<Scalar>,
276285
labels: Tensor<Scalar>,
277-
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.mean() }
286+
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
278287
) -> Tensor<Scalar> {
279288
// This numerically stable implementation is based on the TensorFlow Python API.
280289
let maxLogitsWithZero = max(logits, Tensor(0))

0 commit comments

Comments
 (0)