Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Added support for 'Tensor.clipped(min:max:)' and its VJP. #361

Merged
merged 2 commits into from
Jul 14, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 0 additions & 10 deletions Sources/TensorFlow/Operators/Basic.swift
Original file line number Diff line number Diff line change
Expand Up @@ -675,16 +675,6 @@ public extension Tensor {
}
}

public extension Tensor where Scalar: Numeric {
/// Returns a tensor by clipping scalars to a specified minimum and maximum.
// FIXME: Define a derivative function.
// @differentiable(wrt: self where Scalar: TensorFlowFloatingPoint)
@inlinable
func clipped(min: Tensor, max: Tensor) -> Tensor {
Raw.clipByValue(t: self, clipValueMin: min, clipValueMax: max)
}
}

//===------------------------------------------------------------------------------------------===//
// Broadcasting
//===------------------------------------------------------------------------------------------===//
Expand Down
54 changes: 53 additions & 1 deletion Sources/TensorFlow/Operators/Math.swift
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ public extension PointwiseMultiplicative {
}

//===------------------------------------------------------------------------------------------===//
// Generic elementary functions
// Generic Elementary Functions
//===------------------------------------------------------------------------------------------===//

extension Tensor: ElementaryFunctions where Scalar: TensorFlowFloatingPoint {
Expand Down Expand Up @@ -494,6 +494,58 @@ public extension Tensor where Scalar == Bool {
}
}

public extension Tensor where Scalar: TensorFlowNumeric {
/// Returns `max(min(self, max), min)`.
@inlinable
@differentiable(vjp: _vjpClipped where Scalar: TensorFlowFloatingPoint)
func clipped(min: Tensor, max: Tensor) -> Tensor {
Raw.clipByValue(t: self, clipValueMin: min, clipValueMax: max)
}

/// Returns `max(min(self, max), min)`.
@inlinable
@differentiable(wrt: (self, min) where Scalar: TensorFlowFloatingPoint)
func clipped(min: Tensor, max: Scalar) -> Tensor {
clipped(min: min, max: Tensor(max))
}

/// Returns `max(min(self, max), min)`.
@inlinable
@differentiable(wrt: (self, max) where Scalar: TensorFlowFloatingPoint)
func clipped(min: Scalar, max: Tensor) -> Tensor {
clipped(min: Tensor(min), max: max)
}

/// Returns `max(min(self, max), min)`.
@inlinable
@differentiable(wrt: self where Scalar: TensorFlowFloatingPoint)
func clipped(min: Scalar, max: Scalar) -> Tensor {
clipped(min: Tensor(min), max: Tensor(max))
}
}

internal extension Tensor where Scalar: TensorFlowFloatingPoint {
@inlinable
func _vjpClipped(min: Tensor, max: Tensor) -> (Tensor, (Tensor) -> (Tensor, Tensor, Tensor)) {
(clipped(min: min, max: max), { v in
let selfShape = self.shapeTensor
let minShape = min.shapeTensor
let maxShape = max.shapeTensor
let zeros = Tensor(zerosLike: v)
let minMask = self .< min
let maxMask = self .> max
let selfGradient = v.replacing(with: zeros, where: minMask.elementsLogicalOr(maxMask))
let minGradient = zeros.replacing(with: v, where: minMask)
let maxGradient = zeros.replacing(with: v, where: maxMask)
let (selfAxes, minAxes) = Raw.broadcastGradientArgs(s0: selfShape, s1: minShape)
let (_, maxAxes) = Raw.broadcastGradientArgs(s0: selfShape, s1: maxShape)
return (selfGradient.sum(squeezingAxes: selfAxes).reshaped(toShape: selfShape),
minGradient.sum(squeezingAxes: minAxes).reshaped(toShape: minShape),
maxGradient.sum(squeezingAxes: maxAxes).reshaped(toShape: maxShape))
})
}
}

//===------------------------------------------------------------------------------------------===//
// Element-wise Unary Math Functions
//===------------------------------------------------------------------------------------------===//
Expand Down
17 changes: 17 additions & 0 deletions Tests/TensorFlowTests/OperatorTests/MathTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,23 @@ final class MathOperatorTests: XCTestCase {
{ x in root(x, 3) }, { x in Float.root(x, 3) })
}

func testClipping() {
let x = Tensor<Float>([
[0.45031791, 0.41123222, 0.53928467, 0.47167023, 0.15483777],
[0.49975705, 0.71807549, 0.30396056, 0.26904690, 0.01404393],
[0.16950939, 0.41085612, 0.79503016, 0.11977817, 0.99728241],
[0.62510073, 0.17344792, 0.15406050, 0.40758517, 0.93683817],
[0.15653343, 0.50502756, 0.99365925, 0.84617581, 0.17422509]])
let clippedX = x.clipped(min: 0.2, max: 0.5)
let expectedClippedX = Tensor<Float>([
[0.45031791, 0.41123222, 0.50000000, 0.47167023, 0.20000000],
[0.49975705, 0.50000000, 0.30396056, 0.26904690, 0.20000000],
[0.20000000, 0.41085612, 0.50000000, 0.20000000, 0.50000000],
[0.50000000, 0.20000000, 0.20000000, 0.40758517, 0.50000000],
[0.20000000, 0.50000000, 0.50000000, 0.50000000, 0.20000000]])
assertEqual(clippedX, expectedClippedX, accuracy: 0.0001)
}

func testRsqrt() {
let x = Tensor<Double>([1, 0.25, 1.0 / 9.0, 0.0625, 0.04])
let target = Tensor<Double>([1, 2, 3, 4, 5]).sum()
Expand Down