Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

[Linear Algebra] Add det and slogdet #604

Merged
merged 14 commits into from
Jan 14, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions Sources/TensorFlow/Operators/LinearAlgebra.swift
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,31 @@ public func trace<T: TensorFlowNumeric>(_ matrix: Tensor<T>) -> Tensor<T> {
return matrix.diagonalPart().sum(squeezingAxes: -1)
}

/// Computes the determinant of an optionally batched matrix.
///
/// - Parameter matrix: A tensor of shape `[..., M, M]`.
/// - Returns: A tensor containing the determinants of all input submatrices.
@inlinable
func det<T: TensorFlowFloatingPoint>(_ matrix: Tensor<T>) -> Tensor<T> {
_Raw.matrixDeterminant(matrix)
}

/// Computes the sign and the natural logarithm of the absolute value of the determinant of an
/// optionally batched square matrix.
///
/// - Parameter matrix: A tensor of shape `[..., N, M, M]`.
/// - Returns:
/// - sign: A tensor with shape `[N]`, representing the signs of the natural logarithms of the
/// determinants of input submatrices.
/// - logAbsDeterminant: A tensor with shape `[N]`, representing the natural logarithms of the
/// absolute values of the determinants of input submatrices.
@inlinable
func slogdet<T: TensorFlowFloatingPoint>(_ matrix: Tensor<T>) -> (
sign: Tensor<T>, logAbsDeterminant: Tensor<T>
) {
_Raw.logMatrixDeterminant(matrix)
}

/// Computes the natural logarithm of the determinant of a hermitian positive definite matrix.
///
/// - Parameter matrix: A tensor of shape `[..., M, N]`.
Expand Down
30 changes: 30 additions & 0 deletions Tests/TensorFlowTests/OperatorTests/LinearAlgebraTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,34 @@ final class LinearAlgebraTests: XCTestCase {
assertEqual(computedGradient, expectedGradient, accuracy: 1e-16)
}

func testDet() {
var matrix = Tensor<Float>(shape: [1, 4, 4], scalars: (0..<16).map(Float.init))
var computedDet = det(matrix)
var expectedDet = Tensor<Float>([0])
XCTAssertEqual(computedDet, expectedDet)

matrix = Tensor<Float>(shape: [2, 2, 2, 2], scalars: (0..<16).map(Float.init))
computedDet = det(matrix)
expectedDet = Tensor<Float>([[-2.0, -2.0], [-2.0, -2.0]])
assertEqual(computedDet, expectedDet, accuracy: 1e-5)
}

func testSlogdet() {
var input = Tensor<Float>(shape: [1, 2, 2], scalars: (0..<4).map(Float.init))
var expectedSigns = Tensor<Float>([-1])
var expectedLogs = Tensor<Float>([0.6931472])
var (computedSigns, computedLogs) = slogdet(input)
XCTAssertEqual(computedSigns, expectedSigns)
XCTAssertEqual(computedLogs, expectedLogs)

input = Tensor<Float>(shape: [2, 2, 2, 2], scalars: (0..<16).map(Float.init))
expectedSigns = Tensor<Float>([[-1.0, -1.0], [-1.0, -1.0]])
expectedLogs = Tensor<Float>([[0.6931472, 0.6931462], [0.6931462, 0.6931435]])
(computedSigns, computedLogs) = slogdet(input)
XCTAssertEqual(computedSigns, expectedSigns)
XCTAssertEqual(computedLogs, expectedLogs)
}

func testLogdet() {
let input = Tensor<Float>([[[6.0, 4.0], [4.0, 6.0]], [[2.0, 6.0], [6.0, 20.0]]])
let expected = Tensor<Float>([2.9957323, 1.3862934])
Expand Down Expand Up @@ -148,6 +176,8 @@ final class LinearAlgebraTests: XCTestCase {
("testSVD", testSVD),
("testTrace", testTrace),
("testTraceGradient", testTraceGradient),
("testDet", testDet),
("testSlogdet", testSlogdet),
("testLogdet", testLogdet),
("testLogdetGradient", testLogdetGradient)
]
Expand Down
1 change: 0 additions & 1 deletion Tests/TensorFlowTests/OperatorTests/MatrixTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
import XCTest
@testable import TensorFlow


final class MatrixTests: XCTestCase {
func testDiagonalPart() {
// Test on a matrix.
Expand Down