Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

remove default argument closures #573

Merged
merged 2 commits into from
Dec 5, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions Sources/TensorFlow/BackwardsCompatibility.swift
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ public func hingeLoss<Scalar: TensorFlowFloatingPoint>(
predicted: Tensor<Scalar>,
expected: Tensor<Scalar>
) -> Tensor<Scalar> {
hingeLoss(predicted: predicted, expected: expected, reduction: _mean)
hingeLoss(predicted: predicted, expected: expected, reduction: { $0.mean() })
}

/// Returns the squared hinge loss between predictions and expectations.
Expand All @@ -65,7 +65,7 @@ public func squaredHingeLoss<Scalar: TensorFlowFloatingPoint>(
predicted: Tensor<Scalar>,
expected: Tensor<Scalar>
) -> Tensor<Scalar> {
squaredHingeLoss(predicted: predicted, expected: expected, reduction: _mean)
squaredHingeLoss(predicted: predicted, expected: expected, reduction: { $0.mean() })
}

/// Returns the categorical hinge loss between predictions and expectations.
Expand All @@ -78,7 +78,7 @@ public func categoricalHingeLoss<Scalar: TensorFlowFloatingPoint>(
predicted: Tensor<Scalar>,
expected: Tensor<Scalar>
) -> Tensor<Scalar> {
categoricalHingeLoss(predicted: predicted, expected: expected, reduction: _mean)
categoricalHingeLoss(predicted: predicted, expected: expected, reduction: { $0.mean() })
}

/// Returns the logarithm of the hyperbolic cosine of the error between predictions and
Expand All @@ -92,7 +92,7 @@ public func logCoshLoss<Scalar: TensorFlowFloatingPoint>(
predicted: Tensor<Scalar>,
expected: Tensor<Scalar>
) -> Tensor<Scalar> {
logCoshLoss(predicted: predicted, expected: expected, reduction: _mean)
logCoshLoss(predicted: predicted, expected: expected, reduction: { $0.mean() })
}

/// Returns the Poisson loss between predictions and expectations.
Expand All @@ -105,7 +105,7 @@ public func poissonLoss<Scalar: TensorFlowFloatingPoint>(
predicted: Tensor<Scalar>,
expected: Tensor<Scalar>
) -> Tensor<Scalar> {
poissonLoss(predicted: predicted, expected: expected, reduction: _mean)
poissonLoss(predicted: predicted, expected: expected, reduction: { $0.mean() })
}

/// Returns the Kullback-Leibler divergence (KL divergence) between between expectations and
Expand All @@ -132,7 +132,7 @@ public func softmaxCrossEntropy<Scalar: TensorFlowFloatingPoint>(
logits: Tensor<Scalar>,
probabilities: Tensor<Scalar>
) -> Tensor<Scalar> {
softmaxCrossEntropy(logits: logits, probabilities: probabilities, reduction: _mean)
softmaxCrossEntropy(logits: logits, probabilities: probabilities, reduction: { $0.mean() })
}

/// Returns the sigmoid cross entropy (binary cross entropy) between logits and labels.
Expand All @@ -144,5 +144,5 @@ public func sigmoidCrossEntropy<Scalar: TensorFlowFloatingPoint>(
logits: Tensor<Scalar>,
labels: Tensor<Scalar>
) -> Tensor<Scalar> {
sigmoidCrossEntropy(logits: logits, labels:labels, reduction: _mean)
sigmoidCrossEntropy(logits: logits, labels:labels, reduction: { $0.mean() })
}
37 changes: 24 additions & 13 deletions Sources/TensorFlow/Loss.swift
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
public func l1Loss<Scalar: TensorFlowFloatingPoint>(
predicted: Tensor<Scalar>,
expected: Tensor<Scalar>,
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.sum() }
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _sum
) -> Tensor<Scalar> {
reduction(abs(expected - predicted))
}
Expand All @@ -37,7 +37,7 @@ public func l1Loss<Scalar: TensorFlowFloatingPoint>(
public func l2Loss<Scalar: TensorFlowFloatingPoint>(
predicted: Tensor<Scalar>,
expected: Tensor<Scalar>,
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.sum() }
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _sum
) -> Tensor<Scalar> {
reduction((expected - predicted).squared())
}
Expand Down Expand Up @@ -189,20 +189,11 @@ public func poissonLoss<Scalar: TensorFlowFloatingPoint>(
public func kullbackLeiblerDivergence<Scalar: TensorFlowFloatingPoint>(
predicted: Tensor<Scalar>,
expected: Tensor<Scalar>,
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.sum() }
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _sum
) -> Tensor<Scalar> {
reduction(expected * log(expected / predicted))
}

/// Workaround for cross-module default parameter @differentiable functions.
/// Tensor<Scalar>.mean() is the preferred way to do this.
@differentiable
public func _mean<Scalar: TensorFlowFloatingPoint>(
_ value: Tensor<Scalar>
) -> Tensor<Scalar> {
return value.mean()
}

/// Returns the softmax cross entropy (categorical cross entropy) between logits and labels.
///
/// - Parameters:
Expand Down Expand Up @@ -310,11 +301,31 @@ public func huberLoss<Scalar: TensorFlowFloatingPoint>(
predicted: Tensor<Scalar>,
expected: Tensor<Scalar>,
delta: Scalar,
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.sum() }
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _sum
) -> Tensor<Scalar> {
let error = expected - predicted
let absError = abs(error)
let quadratic = min(absError, delta)
let linear = absError - quadratic
return reduction((0.5 * quadratic * quadratic) + (delta * linear))
}

/// Workaround for TF-1030 so that we can use sum as a default argument for reductions.
/// `Tensor<Scalar>.sum()` is the preferred way to do this.
// TODO(TF-1030): Remove this and replace with `{ $0.sum() }`.
@differentiable
public func _sum<Scalar: TensorFlowFloatingPoint>(
_ value: Tensor<Scalar>
) -> Tensor<Scalar> {
return value.sum()
}

/// Workaround for TF-1030 so that we can use mean as a default argument for reductions.
/// `Tensor<Scalar>.mean()` is the preferred way to do this.
// TODO(TF-1030): Remove this and replace with `{ $0.mean() }`.
@differentiable
public func _mean<Scalar: TensorFlowFloatingPoint>(
_ value: Tensor<Scalar>
) -> Tensor<Scalar> {
return value.mean()
}
8 changes: 6 additions & 2 deletions Tests/TensorFlowTests/LossTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,9 @@ final class LossTests: XCTestCase {
do {
// Test adapted from:
// https://github.com/tensorflow/tensorflow/blob/148f07323f97ef54998f28cd95c195064ce2c426/tensorflow/python/keras/losses_test.py#L1554
let loss = huberLoss(predicted: predictions, expected: predictions, delta: 1)
// TODO(TF-1025): Remove `reduction`, use default instead.
let loss = huberLoss(
predicted: predictions, expected: predictions, delta: 1, reduction: { $0.sum() })
assertEqual(loss, Tensor(0), accuracy: 1e-6)
}

Expand All @@ -239,7 +241,9 @@ final class LossTests: XCTestCase {
// print(loss(labels, predictions))
// # tf.Tensor(0.62500006, shape=(), dtype=float32)
// ```
let loss = huberLoss(predicted: predictions, expected: labels, delta: Float(1))
// TODO(TF-1025): Remove `reduction`, use default instead.
let loss = huberLoss(
predicted: predictions, expected: labels, delta: Float(1), reduction: { $0.sum() })
assertEqual(loss, Tensor(0.62500006), accuracy: 1e-6)
}
}
Expand Down