Skip to content

[TF] TF-496: Conditionally conform SIMD types to Differentiable #24786

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
7fb96b1
Initial broken version.
bartchr808 May 13, 2019
d3182a3
Add VectorView.
bartchr808 May 13, 2019
aa23187
WIP while waiting for swift forums answer.
bartchr808 May 14, 2019
fe38362
WIP: removed from AdditiveArithmetic, kept in CIMD.
bartchr808 May 14, 2019
5f109a0
Remove += from SIMD protocol, move it to Struct SIMDn.
bartchr808 May 14, 2019
304026b
Have SIMDn conform to Differentiable.
bartchr808 May 14, 2019
b75db94
Forgot to remove zero, already implemented in SIMDVector.swift
bartchr808 May 14, 2019
d67acef
PR fix: spacing.
bartchr808 May 14, 2019
17f52bf
Start making + differentiable
bartchr808 May 15, 2019
031f384
Get + and - differentiable.
bartchr808 May 15, 2019
83e7f75
WIP
bartchr808 May 15, 2019
0268fc6
Make all necessary operators differentiable.
bartchr808 May 16, 2019
72d13e5
Address 1st batch of PR comments.
bartchr808 May 16, 2019
91fd99b
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 May 16, 2019
e057686
Fix due to changes in PR #24825.
bartchr808 May 16, 2019
260f2fb
Address 2nd batch of PR comments.
bartchr808 May 16, 2019
c4de3c9
Make _vjpSubscript internal and inline.
bartchr808 May 16, 2019
e4e96dd
WIP: test for + doesn't seem to work, filed JIRA bug.
bartchr808 May 16, 2019
5491b5b
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 May 17, 2019
ec43e8d
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 May 28, 2019
9cf828e
WIP: add back functions
bartchr808 May 29, 2019
12f674e
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 May 29, 2019
0107e98
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 May 30, 2019
85473be
Make negation diff & start fixing tests.
bartchr808 May 30, 2019
36f687e
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 Jun 2, 2019
99b8408
Fix bugs in code and add tests
bartchr808 Jun 2, 2019
ffdb4c4
Get sum() differentiable.
bartchr808 Jun 3, 2019
c8df93f
Cleanup and additional init(repeating:) test.
bartchr808 Jun 3, 2019
2821b8f
Fix up AdditiveArithmetic conformance.
bartchr808 Jun 3, 2019
bc32cf9
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 Jun 13, 2019
7abb1c3
Code cleanup and additonal tests.
bartchr808 Jun 13, 2019
0adb6eb
White space.
bartchr808 Jun 13, 2019
d73f3da
Add generic tests.
bartchr808 Jun 13, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
274 changes: 261 additions & 13 deletions stdlib/public/core/SIMDVector.swift
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,13 @@ extension SIMD {

/// A vector with the specified value in all lanes.
@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpInit(repeating:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.TangentVector == Self,
Scalar.TangentVector == Scalar)
public init(repeating value: Scalar) {
self.init()
for i in indices { self[i] = value }
Expand Down Expand Up @@ -779,29 +786,53 @@ extension SIMD where Scalar: FixedWidthInteger {

// Implementations of floating-point operations. These should eventually all
// be replaced with @_semantics to lower directly to vector IR nodes.
extension SIMD where Scalar: FloatingPoint {
@_transparent
extension SIMD where Scalar : FloatingPoint {
@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpAdd(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint,
Self.TangentVector.Scalar : BinaryFloatingPoint)
public static func +(lhs: Self, rhs: Self) -> Self {
var result = Self()
for i in result.indices { result[i] = lhs[i] + rhs[i] }
return result
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpSubtract(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint,
Self.TangentVector.Scalar : BinaryFloatingPoint)
public static func -(lhs: Self, rhs: Self) -> Self {
var result = Self()
for i in result.indices { result[i] = lhs[i] - rhs[i] }
return result
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpMultiply(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint,
Self.TangentVector == Self)
public static func *(lhs: Self, rhs: Self) -> Self {
var result = Self()
for i in result.indices { result[i] = lhs[i] * rhs[i] }
return result
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpDivide(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint,
Self.TangentVector == Self)
public static func /(lhs: Self, rhs: Self) -> Self {
var result = Self()
for i in result.indices { result[i] = lhs[i] / rhs[i] }
Expand Down Expand Up @@ -842,7 +873,16 @@ extension SIMD where Scalar: FloatingPoint {
}

/// Returns the sum of the scalars in the vector.
@_alwaysEmitIntoClient
// SWIFT_ENABLE_TENSORFLOW
// FIXME: TF-545 we want the sum() func to be marked as
// `@_alwaysEmitIntoClient` like before when we define the VJP
@inlinable
@differentiable(vjp: _vjpSum
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Scalar.TangentVector : BinaryFloatingPoint,
Self.TangentVector == Self)
public func sum() -> Scalar {
// Implementation note: this eventually be defined to lower to either
// llvm.experimental.vector.reduce.fadd or an explicit tree-sum. Open-
Expand Down Expand Up @@ -1157,60 +1197,112 @@ extension SIMD where Scalar: FixedWidthInteger {
extension SIMD where Scalar: FloatingPoint {

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpNegate(rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint,
Self.TangentVector.Scalar : BinaryFloatingPoint)
public static prefix func -(rhs: Self) -> Self {
return 0 - rhs
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpAdd(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : Differentiable & BinaryFloatingPoint,
Scalar.TangentVector : BinaryFloatingPoint,
Self.TangentVector.Scalar == Scalar.TangentVector)
public static func +(lhs: Scalar, rhs: Self) -> Self {
return Self(repeating: lhs) + rhs
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpSubtract(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : Differentiable & BinaryFloatingPoint,
Scalar.TangentVector : BinaryFloatingPoint,
Self.TangentVector.Scalar == Scalar.TangentVector)
public static func -(lhs: Scalar, rhs: Self) -> Self {
return Self(repeating: lhs) - rhs
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpMultiply(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.TangentVector == Self,
Scalar.TangentVector == Scalar)
public static func *(lhs: Scalar, rhs: Self) -> Self {
return Self(repeating: lhs) * rhs
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpDivide(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.TangentVector == Self,
Scalar.TangentVector == Scalar)
public static func /(lhs: Scalar, rhs: Self) -> Self {
return Self(repeating: lhs) / rhs
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpAdd(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : Differentiable & BinaryFloatingPoint,
Scalar.TangentVector : BinaryFloatingPoint,
Self.TangentVector.Scalar == Scalar.TangentVector)
public static func +(lhs: Self, rhs: Scalar) -> Self {
return lhs + Self(repeating: rhs)
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpSubtract(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : Differentiable & BinaryFloatingPoint,
Scalar.TangentVector : BinaryFloatingPoint,
Self.TangentVector.Scalar == Scalar.TangentVector)
public static func -(lhs: Self, rhs: Scalar) -> Self {
return lhs - Self(repeating: rhs)
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpMultiply(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.TangentVector == Self,
Scalar.TangentVector == Scalar)
public static func *(lhs: Self, rhs: Scalar) -> Self {
return lhs * Self(repeating: rhs)
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpDivide(lhs:rhs:)
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.TangentVector == Self,
Scalar.TangentVector == Scalar)
public static func /(lhs: Self, rhs: Scalar) -> Self {
return lhs / Self(repeating: rhs)
}

@_transparent
public static func +=(lhs: inout Self, rhs: Self) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In order to make SIMD conform to AdditiveArithmetic, I had to remove both of these implementations as two protocols cannot both have default implementations. Let me know if this is okay, or if there is something else we should do.

lhs = lhs + rhs
}

@_transparent
public static func -=(lhs: inout Self, rhs: Self) {
lhs = lhs - rhs
}

@_transparent
public static func *=(lhs: inout Self, rhs: Self) {
lhs = lhs * rhs
Expand Down Expand Up @@ -1407,3 +1499,159 @@ where T: SIMD, T.Scalar: FloatingPoint {
}
return result
}

// SWIFT_ENABLE_TENSORFLOW
extension SIMD
where Self : Differentiable,
TangentVector : SIMD,
Scalar : BinaryFloatingPoint,
TangentVector.Scalar : BinaryFloatingPoint {
@inlinable
static func _vjpAdd(lhs: Self, rhs: Self)
-> (Self, (TangentVector) -> (TangentVector, TangentVector)) {
return (lhs + rhs, { v in
return (v, v)
})
}

@inlinable
static func _vjpSubtract(lhs: Self, rhs: Self)
-> (Self, (TangentVector) -> (TangentVector, TangentVector)) {
return (lhs - rhs, { v in
return (v, -v)
})
}

@inlinable
static func _vjpNegate(rhs: Self)
-> (Self, (TangentVector) -> (TangentVector)) {
return (-rhs, { v in
return -v
})
}
}

extension SIMD
where Self : Differentiable,
TangentVector : SIMD,
Scalar : BinaryFloatingPoint,
Self.TangentVector == Self {
@inlinable
static func _vjpMultiply(lhs: Self, rhs: Self)
-> (Self, (TangentVector) -> (TangentVector, TangentVector)) {
return (lhs * rhs, { v in
return (v * rhs, v * lhs)
})
}

@inlinable
static func _vjpDivide(lhs: Self, rhs: Self)
-> (Self, (TangentVector) -> (TangentVector, TangentVector)) {
return (lhs / rhs, { v in
(v / rhs, -lhs / (rhs * rhs) * v)
})
}
}

extension SIMD
where Self : Differentiable,
TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Scalar.TangentVector : BinaryFloatingPoint,
TangentVector.Scalar == Scalar.TangentVector {
@inlinable
static func _vjpAdd(lhs: Scalar, rhs: Self)
-> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) {
return (lhs + rhs, { v in
return (v.sum(), v)
})
}

@inlinable
static func _vjpSubtract(lhs: Scalar, rhs: Self)
-> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) {
return (lhs - rhs, { v in
return (v.sum(), -v)
})
}

@inlinable
static func _vjpAdd(lhs: Self, rhs: Scalar)
-> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) {
return (lhs + rhs, { v in
return (v, v.sum())
})
}

@inlinable
static func _vjpSubtract(lhs: Self, rhs: Scalar)
-> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) {
return (lhs - rhs, { v in
return (v, -v.sum())
})
}
}

extension SIMD
where Self : Differentiable,
TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.TangentVector == Self,
Scalar.TangentVector == Scalar {
@inlinable
static func _vjpMultiply(lhs: Self, rhs: Scalar)
-> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) {
return (lhs * rhs, { v in
return (v * rhs, (v * lhs).sum())
})
}

@inlinable
static func _vjpDivide(lhs: Self, rhs: Scalar)
-> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) {
return (lhs / rhs, { v in
(v / rhs, (-lhs / (rhs * rhs) * v).sum())
})
}

@inlinable
static func _vjpMultiply(lhs: Scalar, rhs: Self)
-> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) {
return (lhs * rhs, { v in
return ((v * rhs).sum(), v * lhs)
})
}

@inlinable
static func _vjpDivide(lhs: Scalar, rhs: Self)
-> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) {
return (lhs / rhs, { v in
((v / rhs).sum(), -lhs / (rhs * rhs) * v)
})
}
}

extension SIMD
where Self : Differentiable,
TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Scalar.TangentVector : BinaryFloatingPoint,
TangentVector == Self {
@inlinable
func _vjpSum() -> (Scalar, (Scalar.TangentVector) -> TangentVector) {
return (sum(), { v in Self(repeating: Scalar(v)) })
}
}

extension SIMD
where Self : Differentiable,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.TangentVector == Self,
Scalar.TangentVector == Scalar {
@usableFromInline
static func _vjpInit(repeating value: Scalar)
-> (Self, (TangentVector) -> Scalar.TangentVector) {
return (Self(repeating: value), { v in v.sum() })
}
}
Loading