Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit 9e62906

Browse files
committed
Merge branch 'master' into complex-numbers2
2 parents e58f400 + e5a288b commit 9e62906

File tree

8 files changed

+733
-21
lines changed

8 files changed

+733
-21
lines changed
Lines changed: 220 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,220 @@
1+
#if !COMPILING_TENSORFLOW_MODULE
2+
import TensorFlow
3+
#endif
4+
5+
//===-- Dataset.swift -----------------------------------------*- swift -*-===//
6+
//
7+
// This source file is part of the Swift.org open source project
8+
//
9+
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
10+
// Licensed under Apache License v2.0 with Runtime Library Exception
11+
//
12+
// See https://swift.org/LICENSE.txt for license information
13+
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
14+
//
15+
//===----------------------------------------------------------------------===//
16+
//
17+
// The dataset API.
18+
//
19+
//===----------------------------------------------------------------------===//
20+
21+
/// The default graph seed.
22+
///
23+
/// - Note: See TensorFlow's `python.framework.random_seed.DEFAULT_GRAPH_SEED`.
24+
@usableFromInline let _defaultGraphSeed: Int64 = 87654321
25+
26+
/// Returns the local seeds an operation should use given an op-specific seed.
27+
///
28+
/// Given operation-specific seed, `seed`, this helper function returns two
29+
/// seeds derived from graph-level and op-level seeds. Many random operations
30+
/// internally use the two seeds to allow user to change the seed globally for a
31+
/// graph, or for only specific operations.
32+
///
33+
/// - Note: See TensorFlow's `python.framework.random_seed.get_seed`.
34+
///
35+
// TODO: There's no support for TF's "global seed" yet, so we always use the
36+
// default graph seed as the first seed. Need to investigate the best way to
37+
// model TF's "global seed".
38+
@usableFromInline @inline(__always)
39+
func _tensorSeeds(_ seed: Tensor<Int64>) -> (Tensor<Int64>, Tensor<Int64>) {
40+
return (Tensor(_defaultGraphSeed), seed)
41+
}
42+
43+
//===----------------------------------------------------------------------===//
44+
// Single value dataset
45+
//===----------------------------------------------------------------------===//
46+
47+
/// Represents a potentially large set of elements.
48+
///
49+
/// A `Dataset` can be used to represent an input pipeline as a collection of
50+
/// element tensors.
51+
@_fixed_layout
52+
public struct Dataset<Element : TensorGroup> {
53+
public let _handle: VariantHandle
54+
55+
@inlinable
56+
public init(_handle: VariantHandle) {
57+
self._handle = _handle
58+
}
59+
}
60+
61+
public extension Dataset {
62+
@inlinable
63+
init(randomSeed: Int64) {
64+
let (seed1, seed2) = _tensorSeeds(Tensor(randomSeed))
65+
self.init(_handle: Raw.experimentalRandomDataset(
66+
seed: seed1,
67+
seed2: seed2,
68+
outputTypes: Element._typeList,
69+
outputShapes: Element._unknownShapeList))
70+
}
71+
}
72+
73+
public extension Dataset {
74+
/// Creates a dataset from a batch of elements as a tensor.
75+
@inlinable
76+
init(elements: Element) {
77+
self.init(_handle: Raw.tensorSliceDataset(
78+
components: [elements],
79+
outputShapes: Element._unknownShapeList))
80+
}
81+
}
82+
83+
extension Dataset : Sequence {
84+
public typealias Iterator = DatasetIterator<Element>
85+
86+
/// Returns an iterator over the elements of this dataset.
87+
@inlinable
88+
public func makeIterator() -> DatasetIterator<Element> {
89+
let resource = Raw.anonymousIterator(
90+
outputTypes: Element._typeList,
91+
outputShapes: Element._unknownShapeList)
92+
Raw.makeIterator(dataset: _handle, iterator: resource)
93+
return DatasetIterator(_handle: resource)
94+
}
95+
}
96+
97+
public extension Dataset {
98+
// Note that this Dataset API implementation uses an experimental tracing
99+
// feature, which is not robust and does not have great diagnostics yet.
100+
@inlinable
101+
func map<ResultElement : TensorGroup>(
102+
_ transform: (Element) -> ResultElement
103+
) -> Dataset<ResultElement> {
104+
return Dataset<ResultElement>(_handle: Raw.mapDataset(
105+
inputDataset: _handle,
106+
otherArguments: Tensor<Int32>(0),
107+
f: transform,
108+
outputTypes: ResultElement._typeList,
109+
outputShapes: ResultElement._unknownShapeList,
110+
useInterOpParallelism: true,
111+
preserveCardinality: false))
112+
}
113+
114+
@inlinable
115+
func map<ResultElement : TensorGroup>(
116+
parallelCallCount: Int,
117+
_ transform: (Element) -> ResultElement
118+
) -> Dataset<ResultElement> {
119+
return Dataset<ResultElement>(_handle: Raw.parallelMapDataset(
120+
inputDataset: _handle,
121+
otherArguments: Tensor<Int32>(0),
122+
numParallelCalls: Tensor<Int32>(Int32(parallelCallCount)),
123+
f: transform,
124+
outputTypes: ResultElement._typeList,
125+
outputShapes: ResultElement._unknownShapeList,
126+
useInterOpParallelism: true,
127+
sloppy: false,
128+
preserveCardinality: false))
129+
}
130+
131+
@inlinable
132+
func filter(
133+
_ isIncluded: (Element) -> Tensor<Bool>
134+
) -> Dataset {
135+
return Dataset(_handle: Raw.filterDataset(
136+
inputDataset: _handle,
137+
otherArguments: Tensor<Int32>(0),
138+
predicate: isIncluded,
139+
outputTypes: Element._typeList,
140+
outputShapes: Element._unknownShapeList))
141+
}
142+
}
143+
144+
public extension Dataset {
145+
@inlinable
146+
func shuffled(
147+
sampleCount: Int, randomSeed: Int64
148+
) -> Dataset {
149+
let (seed1, seed2) = _tensorSeeds(Tensor(randomSeed))
150+
return Dataset(_handle: Raw.shuffleDataset(
151+
inputDataset: _handle,
152+
bufferSize: Tensor(Int64(sampleCount)),
153+
seed: seed1,
154+
seed2: seed2,
155+
outputTypes: Element._typeList,
156+
outputShapes: Element._unknownShapeList))
157+
}
158+
159+
@inlinable
160+
func batched(_ batchSize: Int) -> Dataset {
161+
return Dataset(_handle: Raw.batchDataset(
162+
inputDataset: _handle,
163+
batchSize: Tensor(Int64(batchSize)),
164+
outputTypes: Element._typeList,
165+
outputShapes: Element._unknownShapeList))
166+
}
167+
}
168+
169+
/// The type that allows iteration over a dataset's elements.
170+
@_fixed_layout
171+
public struct DatasetIterator<Element : TensorGroup> {
172+
@usableFromInline let _handle: ResourceHandle
173+
174+
@usableFromInline
175+
internal init(_handle: ResourceHandle) {
176+
self._handle = _handle
177+
}
178+
}
179+
180+
extension DatasetIterator : IteratorProtocol {
181+
/// Advances to the next element and returns it, or `nil` if no next element
182+
/// exists.
183+
@inlinable
184+
public mutating func next() -> Element? {
185+
let optional = Raw.iteratorGetNextAsOptional(
186+
iterator: _handle,
187+
outputTypes: Element._typeList,
188+
outputShapes: Element._unknownShapeList)
189+
guard Raw.optionalHasValue(optional: optional).scalarized() else {
190+
return nil
191+
}
192+
return Raw.optionalGetValue(
193+
optional: optional,
194+
outputShapes: Element._unknownShapeList)
195+
}
196+
}
197+
198+
/// A 2-tuple-like struct that conforms to TensorGroup that represents a tuple
199+
/// of 2 types conforming to TensorGroup.
200+
@_fixed_layout
201+
public struct Zip2TensorGroup<T : TensorGroup, U : TensorGroup> : TensorGroup {
202+
public var first: T
203+
public var second: U
204+
205+
public init(_ first: T, _ second: U) {
206+
self.first = first
207+
self.second = second
208+
}
209+
}
210+
211+
@inlinable
212+
public func zip<T : TensorGroup, U : TensorGroup>(
213+
_ dataset1: Dataset<T>, _ dataset2: Dataset<U>
214+
) -> Dataset<Zip2TensorGroup<T, U>> {
215+
let handle = Raw.zipDataset(
216+
inputDatasets: [dataset1._handle, dataset2._handle],
217+
outputTypes: Zip2TensorGroup<T, U>._typeList,
218+
outputShapes: Zip2TensorGroup<T, U>._unknownShapeList)
219+
return Dataset(_handle: handle)
220+
}

Sources/DeepLearning/Layer.swift

Lines changed: 119 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -513,6 +513,122 @@ public extension Conv2D {
513513
}
514514
}
515515

516+
/// A 3-D convolution layer for spatial/spatio-temporal convolution over images.
517+
///
518+
/// This layer creates a convolution filter that is convolved with the layer input to produce a
519+
/// tensor of outputs.
520+
@_fixed_layout
521+
public struct Conv3D<Scalar: TensorFlowFloatingPoint>: Layer {
522+
/// The 5-D convolution kernel.
523+
public var filter: Tensor<Scalar>
524+
/// The bias vector.
525+
public var bias: Tensor<Scalar>
526+
/// An activation function.
527+
public typealias Activation = @differentiable (Tensor<Scalar>) -> Tensor<Scalar>
528+
/// The element-wise activation function.
529+
@noDerivative public let activation: Activation
530+
/// The strides of the sliding window for spatial dimensions.
531+
@noDerivative public let strides: (Int, Int, Int)
532+
/// The padding algorithm for convolution.
533+
@noDerivative public let padding: Padding
534+
535+
/// Creates a `Conv3D` layer with the specified filter, bias, activation function, strides, and
536+
/// padding.
537+
///
538+
/// - Parameters:
539+
/// - filter: The 5-D convolution kernel.
540+
/// - bias: The bias vector.
541+
/// - activation: The element-wise activation function.
542+
/// - strides: The strides of the sliding window for spatial dimensions.
543+
/// - padding: The padding algorithm for convolution.
544+
public init(
545+
filter: Tensor<Scalar>,
546+
bias: Tensor<Scalar>,
547+
activation: @escaping Activation,
548+
strides: (Int, Int, Int),
549+
padding: Padding
550+
) {
551+
self.filter = filter
552+
self.bias = bias
553+
self.activation = activation
554+
self.strides = strides
555+
self.padding = padding
556+
}
557+
558+
/// Returns the output obtained from applying the layer to the given input.
559+
///
560+
/// - Parameter input: The input to the layer.
561+
/// - Returns: The output.
562+
@differentiable
563+
public func call(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
564+
return activation(input.convolved3D(withFilter: filter,
565+
strides: (1, strides.0, strides.1, strides.2, 1),
566+
padding: padding) + bias)
567+
}
568+
}
569+
570+
public extension Conv3D {
571+
/// Creates a `Conv3D` layer with the specified filter shape, strides, padding, and
572+
/// element-wise activation function. The filter tensor is initialized using Glorot uniform
573+
/// initialization with the specified generator. The bias vector is initialized with zeros.
574+
///
575+
/// - Parameters:
576+
/// - filterShape: The shape of the 5-D convolution kernel.
577+
/// - strides: The strides of the sliding window for spatial/spatio-temporal dimensions.
578+
/// - padding: The padding algorithm for convolution.
579+
/// - activation: The element-wise activation function.
580+
/// - generator: The random number generator for initialization.
581+
///
582+
/// - Note: Use `init(filterShape:strides:padding:activation:seed:)` for faster random
583+
/// initialization.
584+
init<G: RandomNumberGenerator>(
585+
filterShape: (Int, Int, Int, Int, Int),
586+
strides: (Int, Int, Int) = (1, 1, 1),
587+
padding: Padding = .valid,
588+
activation: @escaping Activation = identity,
589+
generator: inout G
590+
) {
591+
let filterTensorShape = TensorShape([
592+
filterShape.0, filterShape.1, filterShape.2, filterShape.3, filterShape.4])
593+
self.init(
594+
filter: Tensor(glorotUniform: filterTensorShape, generator: &generator),
595+
bias: Tensor(zeros: TensorShape([filterShape.4])),
596+
activation: activation,
597+
strides: strides,
598+
padding: padding)
599+
}
600+
}
601+
602+
public extension Conv3D {
603+
/// Creates a `Conv3D` layer with the specified filter shape, strides, padding, and
604+
/// element-wise activation function. The filter tensor is initialized using Glorot uniform
605+
/// initialization with the specified seed. The bias vector is initialized with zeros.
606+
///
607+
/// - Parameters:
608+
/// - filterShape: The shape of the 5-D convolution kernel.
609+
/// - strides: The strides of the sliding window for spatial/spatio-temporal dimensions.
610+
/// - padding: The padding algorithm for convolution.
611+
/// - activation: The element-wise activation function.
612+
/// - seed: The random seed for initialization. The default value is random.
613+
init(
614+
filterShape: (Int, Int, Int, Int, Int),
615+
strides: (Int, Int, Int) = (1, 1, 1),
616+
padding: Padding = .valid,
617+
activation: @escaping Activation = identity,
618+
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
619+
Int64.random(in: Int64.min..<Int64.max))
620+
) {
621+
let filterTensorShape = TensorShape([
622+
filterShape.0, filterShape.1, filterShape.2, filterShape.3, filterShape.4])
623+
self.init(
624+
filter: Tensor(glorotUniform: filterTensorShape, seed: seed),
625+
bias: Tensor(zeros: TensorShape([filterShape.4])),
626+
activation: activation,
627+
strides: strides,
628+
padding: padding)
629+
}
630+
}
631+
516632
/// A 2-D transposed convolution layer (e.g. spatial transposed convolution over images).
517633
///
518634
/// This layer creates a convolution filter that is transpose-convolved with the layer input
@@ -1224,7 +1340,7 @@ public struct UpSampling1D<Scalar: TensorFlowFloatingPoint>: Layer {
12241340
///
12251341
/// - Parameter size: The upsampling factor for timesteps.
12261342
public init(size: Int) {
1227-
self.size = size
1343+
self.size = size
12281344
}
12291345

12301346
/// Returns the output obtained from applying the layer to the given input.
@@ -1250,7 +1366,7 @@ public struct UpSampling2D<Scalar: TensorFlowFloatingPoint>: Layer {
12501366
///
12511367
/// - Parameter size: The upsampling factor for rows and columns.
12521368
public init(size: Int) {
1253-
self.size = size
1369+
self.size = size
12541370
}
12551371

12561372
/// Returns the output obtained from applying the layer to the given input.
@@ -1276,7 +1392,7 @@ public struct UpSampling3D<Scalar: TensorFlowFloatingPoint>: Layer {
12761392
///
12771393
/// - Parameter size: The upsampling factor for rows and columns.
12781394
public init(size: Int) {
1279-
self.size = size
1395+
self.size = size
12801396
}
12811397

12821398
/// Returns the output obtained from applying the layer to the given input.

0 commit comments

Comments
 (0)