Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit d2c78f4

Browse files
eaplataniosrxwei
authored andcommitted
Restructured the operators source files and added support for multiple new operators. (#70)
* Re-organized the operators source files. * Added support for 'stacked', 'concatenated', 'gathered', 'batchGathered', and 'masked'. * Reverted back to 4-space tabs. * Made some other minor changes. * Added support or 'selecting'. * Added support for 'nonZeroIndices'. * Minor edits. * Addressed Richard's feedback. * Addressed Richard's comments. * Addressed Richard's comments. * Updated the convolution ops to support explicit paddings. * Small edits. * Updated the convolution ops to support explicit paddings. * Small fix. * Small fix. * Added a new tensor initializer from ranges of tensors. * Added documentation string for the "explicit" padding scheme. * More fixes. * Added 'zerosLike' and 'onesLike' tensor initializers. * Added a new 'stacking' tensor initializer and made some compatibility fixes. * Added a new 'tiling' tensor initializer. * Minor edit. * Made some refactoring. * Bug fix. * Added support for the split op and its VJP. * Added VJPs for stacking and tiling. * Added VJP for concatenating. * Added the gathering VJP. * Bug fixes. * Added an 'Optimizable' protocol. * Moved some more activation functions from the stdlib. * Added log-softmax VJP. * Minor bug fix. * Brought some initializers from stdlib. * Brought some more stuff from the stdlib. * Minor edit. * Moved some more stuff to swift-apis. * Removed all the newly-added ops. * Moved some more stuff to swift-apis. * Moved some more stuff to swift-apis. * Added a README file to the 'Operators' source directory. * Brought the gradient helper functions from the stdlib. * Bug fixes. * Brought the tensor tests from the stdlib. * Minor bug fix. * Addressed Richard's comments. * Minor edit. * Reverted the change in the existing optimizer implementations. * Added VJPs for some operations. * Incorporated fix from stdlib. * Addressed Richard's feedback. * Changed the indentation in the 'PythonConversion.swift' file. * Changed the indentation in the 'Random.swift' file. * Minor edit. * Tabs to spaces.
1 parent 3f71684 commit d2c78f4

17 files changed

+4222
-104
lines changed
Lines changed: 178 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,178 @@
1+
// Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#if !COMPILING_TENSORFLOW_MODULE
16+
import TensorFlow
17+
#endif
18+
19+
//===------------------------------------------------------------------------------------------===//
20+
// Method-style Differential Operators
21+
//===------------------------------------------------------------------------------------------===//
22+
23+
public extension Differentiable {
24+
@inlinable
25+
func gradient<R: TensorFlowFloatingPoint>(
26+
in f: @differentiable (Self) -> Tensor<R>
27+
) -> CotangentVector {
28+
return self.pullback(in: f)(Tensor<R>(1))
29+
}
30+
31+
@inlinable
32+
func valueWithGradient<R: TensorFlowFloatingPoint>(
33+
in f: @differentiable (Self) -> Tensor<R>
34+
) -> (value: Tensor<R>, gradient: CotangentVector) {
35+
let (y, pb) = self.valueWithPullback(in: f)
36+
return (y, pb(Tensor<R>(1)))
37+
}
38+
39+
@inlinable
40+
func gradient<T: Differentiable, R: TensorFlowFloatingPoint>(
41+
at x: T,
42+
in f: @differentiable (Self, T) -> Tensor<R>
43+
) -> (CotangentVector, T.CotangentVector) {
44+
return self.pullback(at: x, in: f)(Tensor<R>(1))
45+
}
46+
47+
@inlinable
48+
func valueWithGradient<T: Differentiable, R: TensorFlowFloatingPoint>(
49+
at x: T,
50+
in f: @differentiable (Self, T) -> Tensor<R>
51+
) -> (value: Tensor<R>, gradient: (CotangentVector, T.CotangentVector)) {
52+
let (y, pb) = self.valueWithPullback(at: x, in: f)
53+
return (y, pb(Tensor<R>(1)))
54+
}
55+
}
56+
57+
//===------------------------------------------------------------------------------------------===//
58+
// Free-Function-Style Differential Operators
59+
//===------------------------------------------------------------------------------------------===//
60+
61+
// Value with gradient
62+
63+
@inlinable
64+
public func valueWithGradient<T, R>(
65+
at x: T,
66+
in f: @differentiable (T) -> Tensor<R>
67+
) -> (value: Tensor<R>, gradient: T.CotangentVector)
68+
where T: Differentiable, R: TensorFlowFloatingPoint {
69+
let (y, pullback) = valueWithPullback(at: x, in: f)
70+
return (y, pullback(Tensor<R>(1)))
71+
}
72+
73+
@inlinable
74+
public func valueWithGradient<T, U, R>(
75+
at x: T,
76+
_ y: U,
77+
in f: @differentiable (T, U) -> Tensor<R>
78+
) -> (value: Tensor<R>, gradient: (T.CotangentVector, U.CotangentVector))
79+
where T: Differentiable, U: Differentiable, R: TensorFlowFloatingPoint {
80+
let (y, pullback) = valueWithPullback(at: x, y, in: f)
81+
return (y, pullback(Tensor<R>(1)))
82+
}
83+
84+
@inlinable
85+
public func valueWithGradient<T, U, V, R>(
86+
at x: T,
87+
_ y: U,
88+
_ z: V,
89+
in f: @differentiable (T, U, V) -> Tensor<R>
90+
) -> (value: Tensor<R>, gradient: (T.CotangentVector, U.CotangentVector, V.CotangentVector))
91+
where T: Differentiable, U: Differentiable, V: Differentiable, R: TensorFlowFloatingPoint {
92+
let (y, pullback) = valueWithPullback(at: x, y, z, in: f)
93+
return (y, pullback(Tensor<R>(1)))
94+
}
95+
96+
// Value with gradient (curried)
97+
98+
@inlinable
99+
public func valueWithGradient<T, R>(
100+
of f: @escaping @differentiable (T) -> Tensor<R>
101+
) -> (T) -> (value: Tensor<R>, gradient: T.CotangentVector)
102+
where T: Differentiable, R: TensorFlowFloatingPoint {
103+
return { x in valueWithGradient(at: x, in: f) }
104+
}
105+
106+
@inlinable
107+
public func valueWithGradient<T, U, R>(
108+
of f: @escaping @differentiable (T, U) -> Tensor<R>
109+
) -> (T, U) -> (value: Tensor<R>, gradient: (T.CotangentVector, U.CotangentVector))
110+
where T: Differentiable, U: Differentiable, R: TensorFlowFloatingPoint {
111+
return { x, y in valueWithGradient(at: x, y, in: f) }
112+
}
113+
114+
@inlinable
115+
public func valueWithGradient<T, U, V, R>(
116+
of f: @escaping @differentiable (T, U, V) -> Tensor<R>
117+
) -> (T, U, V) -> (
118+
value: Tensor<R>,
119+
gradient: (T.CotangentVector, U.CotangentVector, V.CotangentVector))
120+
where T: Differentiable, U: Differentiable, V: Differentiable, R: TensorFlowFloatingPoint {
121+
return { x, y, z in valueWithGradient(at: x, y, z, in: f) }
122+
}
123+
124+
// Gradient
125+
126+
@inlinable
127+
public func gradient<T, R>(
128+
at x: T,
129+
in f: @differentiable (T) -> Tensor<R>
130+
) -> T.CotangentVector where T: Differentiable, R: TensorFlowFloatingPoint {
131+
return pullback(at: x, in: f)(Tensor<R>(1))
132+
}
133+
134+
@inlinable
135+
public func gradient<T, U, R>(
136+
at x: T,
137+
_ y: U,
138+
in f: @differentiable (T, U) -> Tensor<R>
139+
) -> (T.CotangentVector, U.CotangentVector)
140+
where T: Differentiable, U: Differentiable, R: TensorFlowFloatingPoint {
141+
return pullback(at: x, y, in: f)(Tensor<R>(1))
142+
}
143+
144+
@inlinable
145+
public func gradient<T, U, V, R>(
146+
at x: T,
147+
_ y: U,
148+
_ z: V,
149+
in f: @differentiable (T, U, V) -> Tensor<R>
150+
) -> (T.CotangentVector, U.CotangentVector, V.CotangentVector)
151+
where T: Differentiable, U: Differentiable, V: Differentiable, R: TensorFlowFloatingPoint {
152+
return pullback(at: x, y, z, in: f)(Tensor<R>(1))
153+
}
154+
155+
// Gradient (curried)
156+
157+
@inlinable
158+
public func gradient<T, R>(
159+
of f: @escaping @differentiable (T) -> Tensor<R>
160+
) -> (T) -> T.CotangentVector where T: Differentiable, R: TensorFlowFloatingPoint {
161+
return { x in gradient(at: x, in: f) }
162+
}
163+
164+
@inlinable
165+
public func gradient<T, U, R>(
166+
of f: @escaping @differentiable (T, U) -> Tensor<R>
167+
) -> (T, U) -> (T.CotangentVector, U.CotangentVector)
168+
where T: Differentiable, U: Differentiable, R: TensorFlowFloatingPoint {
169+
return { x, y in gradient(at: x, y, in: f) }
170+
}
171+
172+
@inlinable
173+
public func gradient<T, U, V, R>(
174+
of f: @escaping @differentiable (T, U, V) -> Tensor<R>
175+
) -> (T, U, V) -> (T.CotangentVector, U.CotangentVector, V.CotangentVector)
176+
where T: Differentiable, U: Differentiable, V: Differentiable, R: TensorFlowFloatingPoint {
177+
return { x, y, z in gradient(at: x, y, z, in: f) }
178+
}

Sources/DeepLearning/Helpers.swift

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,20 @@
1313
// limitations under the License.
1414

1515
#if !COMPILING_TENSORFLOW_MODULE
16-
import TensorFlow
16+
@_exported import TensorFlow
1717
#endif
1818

19+
/// Returns a tensor with the same shape and scalars as the specified tensor.
20+
@inlinable
21+
@differentiable
22+
public func identity<Scalar>(_ x: Tensor<Scalar>) -> Tensor<Scalar> {
23+
return x
24+
}
25+
1926
// `pow` is defined in Darwin/Glibc on `Float` and `Double`, but there doesn't exist a generic
2027
// version for `FloatingPoint`.
2128
// This is a manual definition.
22-
func pow<T : BinaryFloatingPoint>(_ x: T, _ y: T) -> T {
29+
@inlinable
30+
func pow<T: BinaryFloatingPoint>(_ x: T, _ y: T) -> T {
2331
return T(pow(Double(x), Double(y)))
2432
}

0 commit comments

Comments
 (0)