Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit 7330b8d

Browse files
param087dan-zheng
authored andcommitted
[Linear Algebra] Add logdet operation (#592)
1 parent 8258e53 commit 7330b8d

File tree

2 files changed

+39
-0
lines changed

2 files changed

+39
-0
lines changed

Sources/TensorFlow/Operators/LinearAlgebra.swift

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,16 @@ public func trace<T: TensorFlowNumeric>(_ matrix: Tensor<T>) -> Tensor<T> {
143143
return matrix.diagonalPart().sum(squeezingAxes: -1)
144144
}
145145

146+
/// Computes the natural logarithm of the determinant of a hermitian positive definite matrix.
147+
///
148+
/// - Parameter matrix: A tensor of shape `[..., M, N]`.
149+
/// - Returns: The natural logarithm of the determinant of `matrix`.
150+
@inlinable
151+
@differentiable(wrt: matrix where T: TensorFlowFloatingPoint)
152+
func logdet<T: TensorFlowFloatingPoint>(_ matrix: Tensor<T>) -> Tensor<T> {
153+
return 2.0 * log(cholesky(matrix).diagonalPart()).sum(squeezingAxes: -1)
154+
}
155+
146156
// MARK: - Decompositions
147157

148158
/// Returns the Cholesky decomposition of one or more square matrices.

Tests/TensorFlowTests/OperatorTests/LinearAlgebraTests.swift

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,11 +84,40 @@ final class LinearAlgebraTests: XCTestCase {
8484
let expectedGradient = Tensor([a, b])
8585
assertEqual(computedGradient, expectedGradient, accuracy: 1e-16)
8686
}
87+
88+
func testLogdet() {
89+
let input = Tensor<Float>([[[6.0, 4.0], [4.0, 6.0]], [[2.0, 6.0], [6.0, 20.0]]])
90+
let expected = Tensor<Float>([2.9957323, 1.3862934])
91+
let computed = logdet(input)
92+
assertEqual(computed, expected, accuracy: 1e-5)
93+
}
8794

95+
// The expected value of the gradient was computed using the following Python code:
96+
// ```
97+
// import tensorflow as tf
98+
// x = tf.constant([[[6., 4.], [4., 6.]], [[2., 6.], [6., 20.]]])
99+
// with tf.GradientTape() as tape:
100+
// tape.watch(x)
101+
// y = tf.reduce_sum(tf.linalg.logdet(x))
102+
// print(tape.gradient(y, x))
103+
// ```
104+
func testLogdetGradient() {
105+
let input = Tensor<Float>([[[6.0, 4.0], [4.0, 6.0]], [[2.0, 6.0], [6.0, 20.0]]])
106+
let expectedGradient = Tensor<Float>([
107+
[[ 0.29999998, -0.2 ],
108+
[-0.2 , 0.3 ]],
109+
[[ 5.0000043 , -1.5000012 ],
110+
[-1.5000012 , 0.50000036]]])
111+
let computedGradient = gradient(at: input) { logdet($0).sum() }
112+
assertEqual(computedGradient, expectedGradient, accuracy: 1e-5)
113+
}
114+
88115
static var allTests = [
89116
("testCholesky", testCholesky),
90117
("testQRDecompositionApproximation", testQRDecompositionApproximation),
91118
("testTrace", testTrace),
92119
("testTraceGradient", testTraceGradient),
120+
("testLogdet", testLogdet),
121+
("testLogdetGradient", testLogdetGradient)
93122
]
94123
}

0 commit comments

Comments
 (0)