Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit 8f06bee

Browse files
jrabarydan-zheng
authored andcommitted
Add huber loss support (#534)
1 parent c47db87 commit 8f06bee

File tree

2 files changed

+56
-0
lines changed

2 files changed

+56
-0
lines changed

Sources/TensorFlow/Loss.swift

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -290,3 +290,31 @@ public func sigmoidCrossEntropy<Scalar: TensorFlowFloatingPoint>(
290290
let negAbsLogits = max(logits, -logits) // Custom `abs` to compute gradients at `0`.
291291
return reduction(maxLogitsWithZero - logits * labels + log1p(exp(-negAbsLogits)))
292292
}
293+
294+
/// Returns the Huber loss between predictions and expectations.
295+
///
296+
/// For each value `x` in the difference `expected - predicted`, the loss is:
297+
/// - `0.5 * x^2` if `abs(x) <= δ`.
298+
/// - `0.5 * δ^2 + δ * (|x| - δ)` otherwise.
299+
///
300+
/// - Source: [Uncyclopedia article](https://en.wikipedia.org/wiki/Huber_loss).
301+
///
302+
/// - Parameters:
303+
/// - predicted: Predicted outputs from a neural network.
304+
/// - expected: Expected values, i.e. targets, that correspond to the correct output.
305+
/// - delta: A floating point scalar representing the point where the Huber loss function changes
306+
/// from quadratic to linear.
307+
/// - reduction: Reduction to apply on the computed element-wise loss values.
308+
@differentiable(wrt: predicted)
309+
public func huberLoss<Scalar: TensorFlowFloatingPoint>(
310+
predicted: Tensor<Scalar>,
311+
expected: Tensor<Scalar>,
312+
delta: Scalar,
313+
reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.sum() }
314+
) -> Tensor<Scalar> {
315+
let error = expected - predicted
316+
let absError = abs(error)
317+
let quadratic = min(absError, delta)
318+
let linear = absError - quadratic
319+
return reduction((0.5 * quadratic * quadratic) + (delta * linear))
320+
}

Tests/TensorFlowTests/LossTests.swift

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,33 @@ final class LossTests: XCTestCase {
216216
[-0.0625, -0.01490036, 0.04759964, 0.0]])
217217
assertEqual(computedGradient, expectedGradient, accuracy: 1e-6)
218218
}
219+
func testHuberLoss() {
220+
let predictions = Tensor<Float>([[0.9, 0.2, 0.2], [0.8, 0.4, 0.6]])
221+
let labels = Tensor<Float>([[1, 0, 1], [1, 0, 0]])
222+
223+
do {
224+
// Test adapted from:
225+
// https://github.com/tensorflow/tensorflow/blob/148f07323f97ef54998f28cd95c195064ce2c426/tensorflow/python/keras/losses_test.py#L1554
226+
let loss = huberLoss(predicted: predictions, expected: predictions, delta: 1)
227+
assertEqual(loss, Tensor(0), accuracy: 1e-6)
228+
}
229+
230+
do {
231+
// Test adapted from:
232+
// https://github.com/tensorflow/tensorflow/blob/148f07323f97ef54998f28cd95c195064ce2c426/tensorflow/python/keras/losses_test.py#L1560
233+
// The expected loss was computed using Python TensorFlow 2.0.0-beta1:
234+
// ```
235+
// import tensorflow as tf # 2.0.0-beta1
236+
// predictions = tf.constant([[0.9, 0.2, 0.2], [0.8, 0.4, 0.6]])
237+
// labels = tf.constant([[1.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
238+
// loss = tf.losses.Huber(delta=1.0, reduction=tf.losses.Reduction.SUM)
239+
// print(loss(labels, predictions))
240+
// # tf.Tensor(0.62500006, shape=(), dtype=float32)
241+
// ```
242+
let loss = huberLoss(predicted: predictions, expected: labels, delta: Float(1))
243+
assertEqual(loss, Tensor(0.62500006), accuracy: 1e-6)
244+
}
245+
}
219246

220247
static var allTests = [
221248
("testL1Loss", testL1Loss),
@@ -237,5 +264,6 @@ final class LossTests: XCTestCase {
237264
testSoftmaxCrossEntropyWithProbabilitiesGrad),
238265
("testSigmoidCrossEntropyLoss", testSigmoidCrossEntropyLoss),
239266
("testSigmoidCrossEntropyGradient", testSigmoidCrossEntropyGradient),
267+
("testHuberLoss", testHuberLoss)
240268
]
241269
}

0 commit comments

Comments
 (0)