-
Notifications
You must be signed in to change notification settings - Fork 10.5k
[TF] TF-496: Conditionally conform SIMD types to Differentiable #24786
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
bartchr808
merged 33 commits into
swiftlang:tensorflow
from
bartchr808:TF-496-conform-SIMD-types-differentiable
Jun 14, 2019
Merged
Changes from all commits
Commits
Show all changes
33 commits
Select commit
Hold shift + click to select a range
7fb96b1
Initial broken version.
bartchr808 d3182a3
Add VectorView.
bartchr808 aa23187
WIP while waiting for swift forums answer.
bartchr808 fe38362
WIP: removed from AdditiveArithmetic, kept in CIMD.
bartchr808 5f109a0
Remove += from SIMD protocol, move it to Struct SIMDn.
bartchr808 304026b
Have SIMDn conform to Differentiable.
bartchr808 b75db94
Forgot to remove zero, already implemented in SIMDVector.swift
bartchr808 d67acef
PR fix: spacing.
bartchr808 17f52bf
Start making + differentiable
bartchr808 031f384
Get + and - differentiable.
bartchr808 83e7f75
WIP
bartchr808 0268fc6
Make all necessary operators differentiable.
bartchr808 72d13e5
Address 1st batch of PR comments.
bartchr808 91fd99b
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 e057686
Fix due to changes in PR #24825.
bartchr808 260f2fb
Address 2nd batch of PR comments.
bartchr808 c4de3c9
Make _vjpSubscript internal and inline.
bartchr808 e4e96dd
WIP: test for + doesn't seem to work, filed JIRA bug.
bartchr808 5491b5b
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 ec43e8d
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 9cf828e
WIP: add back functions
bartchr808 12f674e
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 0107e98
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 85473be
Make negation diff & start fixing tests.
bartchr808 36f687e
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 99b8408
Fix bugs in code and add tests
bartchr808 ffdb4c4
Get sum() differentiable.
bartchr808 c8df93f
Cleanup and additional init(repeating:) test.
bartchr808 2821b8f
Fix up AdditiveArithmetic conformance.
bartchr808 bc32cf9
Merge branch 'tensorflow' into TF-496-conform-SIMD-types-differentiable
bartchr808 7abb1c3
Code cleanup and additonal tests.
bartchr808 0adb6eb
White space.
bartchr808 d73f3da
Add generic tests.
bartchr808 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -90,6 +90,13 @@ extension SIMD { | |
|
||
/// A vector with the specified value in all lanes. | ||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpInit(repeating:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint & Differentiable, | ||
Self.TangentVector == Self, | ||
Scalar.TangentVector == Scalar) | ||
public init(repeating value: Scalar) { | ||
self.init() | ||
for i in indices { self[i] = value } | ||
|
@@ -779,29 +786,53 @@ extension SIMD where Scalar: FixedWidthInteger { | |
|
||
// Implementations of floating-point operations. These should eventually all | ||
// be replaced with @_semantics to lower directly to vector IR nodes. | ||
extension SIMD where Scalar: FloatingPoint { | ||
@_transparent | ||
extension SIMD where Scalar : FloatingPoint { | ||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpAdd(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint, | ||
Self.TangentVector.Scalar : BinaryFloatingPoint) | ||
public static func +(lhs: Self, rhs: Self) -> Self { | ||
var result = Self() | ||
for i in result.indices { result[i] = lhs[i] + rhs[i] } | ||
return result | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpSubtract(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint, | ||
Self.TangentVector.Scalar : BinaryFloatingPoint) | ||
public static func -(lhs: Self, rhs: Self) -> Self { | ||
var result = Self() | ||
for i in result.indices { result[i] = lhs[i] - rhs[i] } | ||
return result | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpMultiply(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint, | ||
Self.TangentVector == Self) | ||
public static func *(lhs: Self, rhs: Self) -> Self { | ||
var result = Self() | ||
for i in result.indices { result[i] = lhs[i] * rhs[i] } | ||
return result | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpDivide(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint, | ||
Self.TangentVector == Self) | ||
public static func /(lhs: Self, rhs: Self) -> Self { | ||
var result = Self() | ||
for i in result.indices { result[i] = lhs[i] / rhs[i] } | ||
|
@@ -842,7 +873,16 @@ extension SIMD where Scalar: FloatingPoint { | |
} | ||
|
||
/// Returns the sum of the scalars in the vector. | ||
@_alwaysEmitIntoClient | ||
// SWIFT_ENABLE_TENSORFLOW | ||
// FIXME: TF-545 we want the sum() func to be marked as | ||
// `@_alwaysEmitIntoClient` like before when we define the VJP | ||
@inlinable | ||
@differentiable(vjp: _vjpSum | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint & Differentiable, | ||
Scalar.TangentVector : BinaryFloatingPoint, | ||
Self.TangentVector == Self) | ||
public func sum() -> Scalar { | ||
// Implementation note: this eventually be defined to lower to either | ||
// llvm.experimental.vector.reduce.fadd or an explicit tree-sum. Open- | ||
|
@@ -1157,60 +1197,112 @@ extension SIMD where Scalar: FixedWidthInteger { | |
extension SIMD where Scalar: FloatingPoint { | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpNegate(rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint, | ||
Self.TangentVector.Scalar : BinaryFloatingPoint) | ||
public static prefix func -(rhs: Self) -> Self { | ||
return 0 - rhs | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpAdd(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : Differentiable & BinaryFloatingPoint, | ||
Scalar.TangentVector : BinaryFloatingPoint, | ||
Self.TangentVector.Scalar == Scalar.TangentVector) | ||
public static func +(lhs: Scalar, rhs: Self) -> Self { | ||
return Self(repeating: lhs) + rhs | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpSubtract(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : Differentiable & BinaryFloatingPoint, | ||
Scalar.TangentVector : BinaryFloatingPoint, | ||
Self.TangentVector.Scalar == Scalar.TangentVector) | ||
public static func -(lhs: Scalar, rhs: Self) -> Self { | ||
return Self(repeating: lhs) - rhs | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpMultiply(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint & Differentiable, | ||
Self.TangentVector == Self, | ||
Scalar.TangentVector == Scalar) | ||
public static func *(lhs: Scalar, rhs: Self) -> Self { | ||
return Self(repeating: lhs) * rhs | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpDivide(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint & Differentiable, | ||
Self.TangentVector == Self, | ||
Scalar.TangentVector == Scalar) | ||
public static func /(lhs: Scalar, rhs: Self) -> Self { | ||
return Self(repeating: lhs) / rhs | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpAdd(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : Differentiable & BinaryFloatingPoint, | ||
Scalar.TangentVector : BinaryFloatingPoint, | ||
Self.TangentVector.Scalar == Scalar.TangentVector) | ||
public static func +(lhs: Self, rhs: Scalar) -> Self { | ||
return lhs + Self(repeating: rhs) | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpSubtract(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : Differentiable & BinaryFloatingPoint, | ||
Scalar.TangentVector : BinaryFloatingPoint, | ||
Self.TangentVector.Scalar == Scalar.TangentVector) | ||
public static func -(lhs: Self, rhs: Scalar) -> Self { | ||
return lhs - Self(repeating: rhs) | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpMultiply(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint & Differentiable, | ||
Self.TangentVector == Self, | ||
Scalar.TangentVector == Scalar) | ||
public static func *(lhs: Self, rhs: Scalar) -> Self { | ||
return lhs * Self(repeating: rhs) | ||
} | ||
|
||
@_transparent | ||
// SWIFT_ENABLE_TENSORFLOW | ||
@differentiable(vjp: _vjpDivide(lhs:rhs:) | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint & Differentiable, | ||
Self.TangentVector == Self, | ||
Scalar.TangentVector == Scalar) | ||
public static func /(lhs: Self, rhs: Scalar) -> Self { | ||
return lhs / Self(repeating: rhs) | ||
} | ||
|
||
@_transparent | ||
public static func +=(lhs: inout Self, rhs: Self) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In order to make SIMD conform to AdditiveArithmetic, I had to remove both of these implementations as two protocols cannot both have default implementations. Let me know if this is okay, or if there is something else we should do. |
||
lhs = lhs + rhs | ||
} | ||
|
||
@_transparent | ||
public static func -=(lhs: inout Self, rhs: Self) { | ||
lhs = lhs - rhs | ||
} | ||
|
||
@_transparent | ||
public static func *=(lhs: inout Self, rhs: Self) { | ||
lhs = lhs * rhs | ||
|
@@ -1407,3 +1499,159 @@ where T: SIMD, T.Scalar: FloatingPoint { | |
} | ||
return result | ||
} | ||
|
||
bartchr808 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
// SWIFT_ENABLE_TENSORFLOW | ||
extension SIMD | ||
where Self : Differentiable, | ||
TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint, | ||
TangentVector.Scalar : BinaryFloatingPoint { | ||
@inlinable | ||
static func _vjpAdd(lhs: Self, rhs: Self) | ||
-> (Self, (TangentVector) -> (TangentVector, TangentVector)) { | ||
return (lhs + rhs, { v in | ||
return (v, v) | ||
}) | ||
} | ||
|
||
@inlinable | ||
static func _vjpSubtract(lhs: Self, rhs: Self) | ||
-> (Self, (TangentVector) -> (TangentVector, TangentVector)) { | ||
return (lhs - rhs, { v in | ||
return (v, -v) | ||
}) | ||
} | ||
|
||
@inlinable | ||
static func _vjpNegate(rhs: Self) | ||
-> (Self, (TangentVector) -> (TangentVector)) { | ||
return (-rhs, { v in | ||
return -v | ||
}) | ||
} | ||
} | ||
|
||
extension SIMD | ||
where Self : Differentiable, | ||
TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint, | ||
Self.TangentVector == Self { | ||
@inlinable | ||
static func _vjpMultiply(lhs: Self, rhs: Self) | ||
-> (Self, (TangentVector) -> (TangentVector, TangentVector)) { | ||
return (lhs * rhs, { v in | ||
return (v * rhs, v * lhs) | ||
}) | ||
} | ||
|
||
@inlinable | ||
static func _vjpDivide(lhs: Self, rhs: Self) | ||
-> (Self, (TangentVector) -> (TangentVector, TangentVector)) { | ||
return (lhs / rhs, { v in | ||
(v / rhs, -lhs / (rhs * rhs) * v) | ||
}) | ||
} | ||
} | ||
|
||
extension SIMD | ||
where Self : Differentiable, | ||
TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint & Differentiable, | ||
Scalar.TangentVector : BinaryFloatingPoint, | ||
TangentVector.Scalar == Scalar.TangentVector { | ||
@inlinable | ||
static func _vjpAdd(lhs: Scalar, rhs: Self) | ||
-> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { | ||
return (lhs + rhs, { v in | ||
return (v.sum(), v) | ||
}) | ||
} | ||
|
||
@inlinable | ||
static func _vjpSubtract(lhs: Scalar, rhs: Self) | ||
-> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { | ||
return (lhs - rhs, { v in | ||
return (v.sum(), -v) | ||
}) | ||
} | ||
|
||
@inlinable | ||
static func _vjpAdd(lhs: Self, rhs: Scalar) | ||
-> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { | ||
return (lhs + rhs, { v in | ||
return (v, v.sum()) | ||
}) | ||
} | ||
|
||
@inlinable | ||
static func _vjpSubtract(lhs: Self, rhs: Scalar) | ||
-> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { | ||
return (lhs - rhs, { v in | ||
return (v, -v.sum()) | ||
}) | ||
} | ||
} | ||
|
||
extension SIMD | ||
where Self : Differentiable, | ||
TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint & Differentiable, | ||
Self.TangentVector == Self, | ||
Scalar.TangentVector == Scalar { | ||
@inlinable | ||
static func _vjpMultiply(lhs: Self, rhs: Scalar) | ||
-> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { | ||
return (lhs * rhs, { v in | ||
return (v * rhs, (v * lhs).sum()) | ||
}) | ||
} | ||
|
||
@inlinable | ||
static func _vjpDivide(lhs: Self, rhs: Scalar) | ||
-> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { | ||
return (lhs / rhs, { v in | ||
(v / rhs, (-lhs / (rhs * rhs) * v).sum()) | ||
}) | ||
} | ||
|
||
@inlinable | ||
static func _vjpMultiply(lhs: Scalar, rhs: Self) | ||
-> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { | ||
return (lhs * rhs, { v in | ||
return ((v * rhs).sum(), v * lhs) | ||
}) | ||
} | ||
|
||
@inlinable | ||
static func _vjpDivide(lhs: Scalar, rhs: Self) | ||
-> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { | ||
return (lhs / rhs, { v in | ||
((v / rhs).sum(), -lhs / (rhs * rhs) * v) | ||
}) | ||
} | ||
} | ||
|
||
extension SIMD | ||
where Self : Differentiable, | ||
TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint & Differentiable, | ||
Scalar.TangentVector : BinaryFloatingPoint, | ||
TangentVector == Self { | ||
@inlinable | ||
func _vjpSum() -> (Scalar, (Scalar.TangentVector) -> TangentVector) { | ||
return (sum(), { v in Self(repeating: Scalar(v)) }) | ||
dan-zheng marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
} | ||
|
||
extension SIMD | ||
where Self : Differentiable, | ||
Self.TangentVector : SIMD, | ||
Scalar : BinaryFloatingPoint & Differentiable, | ||
Self.TangentVector == Self, | ||
Scalar.TangentVector == Scalar { | ||
@usableFromInline | ||
static func _vjpInit(repeating value: Scalar) | ||
-> (Self, (TangentVector) -> Scalar.TangentVector) { | ||
return (Self(repeating: value), { v in v.sum() }) | ||
} | ||
} |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.