Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Add Leaky relu activation function #260

Merged
merged 5 commits into from
Jun 19, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 23 additions & 1 deletion Sources/TensorFlow/Operators/Math.swift
Original file line number Diff line number Diff line change
Expand Up @@ -997,7 +997,7 @@ func _vjpLogSoftmax<T: TensorFlowFloatingPoint>(
}

/// Returns a tensor by applying an exponential linear unit.
/// Specifically, computes `exp(features) - 1` if < 0, `features` otherwise.
/// Specifically, computes `exp(x) - 1` if < 0, `x` otherwise.
/// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
/// ](http://arxiv.org/abs/1511.07289)
@inlinable
Expand All @@ -1014,6 +1014,28 @@ func _vjpElu<T: TensorFlowFloatingPoint>(
return (y, { v in Raw.eluGrad(gradients: v, outputs: y) })
}

/// Returns a tensor by applying the leaky ReLU activation function
/// to the specified tensor element-wise.
/// Specifically, computes `max(x, x * alpha)`.
@inlinable
@differentiable(wrt: x, vjp: _vjpLeakyRelu)
public func leakyRelu<T: TensorFlowFloatingPoint>(
_ x: Tensor<T>,
alpha: Double = 0.2
) -> Tensor<T> {
Raw.leakyRelu(features: x, alpha: alpha)
}

@inlinable
func _vjpLeakyRelu<T: TensorFlowFloatingPoint>(
_ x: Tensor<T>,
alpha: Double
) -> (Tensor<T>, (Tensor<T>) -> Tensor<T>) {
return (leakyRelu(x, alpha: alpha), { v in
Raw.leakyReluGrad(gradients: v, features: x, alpha: alpha)
})
}

/// Computes `relu` of the specified tensor element-wise.
/// Specifically, computes `max(0, x)`.
@inlinable
Expand Down
8 changes: 8 additions & 0 deletions Tests/TensorFlowTests/OperatorTests/MathTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,13 @@ final class MathOperatorTests: XCTestCase {
XCTAssertEqual(y, expected)
}

func testLeakyRelu() {
let x = Tensor<Float>([[-1.0, 2.0, 3.0]])
let y = leakyRelu(x, alpha: 0.4)
let expected = Tensor<Float>([-0.4, 2, 3])
XCTAssertEqual(y, expected)
}

func testXORInference() {
func xor(_ x: Float, _ y: Float) -> Float {
let x = Tensor<Float>([x, y]).reshaped(to: [1, 2])
Expand Down Expand Up @@ -314,6 +321,7 @@ final class MathOperatorTests: XCTestCase {
("testArgmax", testArgmax),
("testSoftplus", testSoftplus),
("testSoftsign", testSoftsign),
("testLeakyRelu", testLeakyRelu),
("testCeilAndFloor", testCeilAndFloor),
("testSimpleMath", testSimpleMath),
("testStandardDeviation", testStandardDeviation),
Expand Down