@@ -30,7 +30,9 @@ public struct Conv1D<Scalar: TensorFlowFloatingPoint>: Layer {
30
30
@noDerivative public let stride : Int
31
31
/// The padding algorithm for convolution.
32
32
@noDerivative public let padding : Padding
33
-
33
+ /// The dilation factor for temporal dimension.
34
+ @noDerivative public let dilation : Int
35
+
34
36
/// Creates a `Conv1D` layer with the specified filter, bias, activation function, stride, and
35
37
/// padding.
36
38
///
@@ -40,18 +42,21 @@ public struct Conv1D<Scalar: TensorFlowFloatingPoint>: Layer {
40
42
/// - activation: The element-wise activation function.
41
43
/// - stride: The stride of the sliding window for temporal dimension.
42
44
/// - padding: The padding algorithm for convolution.
45
+ /// - dilation: The dilation factor for temporal dimension.
43
46
public init (
44
47
filter: Tensor < Scalar > ,
45
48
bias: Tensor < Scalar > ,
46
49
activation: @escaping Activation ,
47
50
stride: Int ,
48
- padding: Padding
51
+ padding: Padding ,
52
+ dilation: Int
49
53
) {
50
54
self . filter = filter
51
55
self . bias = bias
52
56
self . activation = activation
53
57
self . stride = stride
54
58
self . padding = padding
59
+ self . dilation = dilation
55
60
}
56
61
57
62
/// Returns the output obtained from applying the layer to the given input.
@@ -60,8 +65,12 @@ public struct Conv1D<Scalar: TensorFlowFloatingPoint>: Layer {
60
65
/// - Returns: The output `[batchCount, newWidth, outputChannels]`.
61
66
@differentiable
62
67
public func callAsFunction( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
63
- let conv = conv2D ( input. expandingShape ( at: 1 ) , filter: filter. expandingShape ( at: 0 ) ,
64
- strides: ( 1 , 1 , stride, 1 ) , padding: padding)
68
+ let conv = conv2D (
69
+ input. expandingShape ( at: 1 ) ,
70
+ filter: filter. expandingShape ( at: 0 ) ,
71
+ strides: ( 1 , 1 , stride, 1 ) ,
72
+ padding: padding,
73
+ dilations: ( 1 , 1 , dilation, 1 ) )
65
74
return activation ( conv. squeezingShape ( at: 1 ) + bias)
66
75
}
67
76
}
@@ -76,15 +85,17 @@ public extension Conv1D where Scalar.RawSignificand: FixedWidthInteger {
76
85
/// `[width, inputChannels, outputChannels]`.
77
86
/// - stride: The stride of the sliding window for temporal dimension.
78
87
/// - padding: The padding algorithm for convolution.
88
+ /// - dilation: The dilation factor for temporal dimension.
79
89
/// - activation: The element-wise activation function.
80
90
/// - generator: The random number generator for initialization.
81
91
///
82
- /// - Note: Use `init(filterShape:stride:padding:activation:seed:)` for faster random
92
+ /// - Note: Use `init(filterShape:stride:padding:dilation: activation:seed:)` for faster random
83
93
/// initialization.
84
94
init < G: RandomNumberGenerator > (
85
95
filterShape: ( Int , Int , Int ) ,
86
96
stride: Int = 1 ,
87
97
padding: Padding = . valid,
98
+ dilation: Int = 1 ,
88
99
activation: @escaping Activation = identity,
89
100
generator: inout G
90
101
) {
@@ -95,7 +106,8 @@ public extension Conv1D where Scalar.RawSignificand: FixedWidthInteger {
95
106
bias: Tensor ( zeros: [ filterShape. 2 ] ) ,
96
107
activation: activation,
97
108
stride: stride,
98
- padding: padding)
109
+ padding: padding,
110
+ dilation: dilation)
99
111
}
100
112
}
101
113
@@ -109,12 +121,14 @@ public extension Conv1D {
109
121
/// `[width, inputChannels, outputChannels]`.
110
122
/// - stride: The stride of the sliding window for temporal dimension.
111
123
/// - padding: The padding algorithm for convolution.
124
+ /// - dilation: The dilation factor for the temporal dimension.
112
125
/// - activation: The element-wise activation function.
113
126
/// - seed: The random seed for initialization. The default value is random.
114
127
init (
115
128
filterShape: ( Int , Int , Int ) ,
116
129
stride: Int = 1 ,
117
130
padding: Padding = . valid,
131
+ dilation: Int = 1 ,
118
132
activation: @escaping Activation = identity,
119
133
seed: ( Int32 , Int32 ) = ( Int32 . random ( in: Int32 . min..< Int32 . max) ,
120
134
Int32 . random ( in: Int32 . min..< Int32 . max) )
@@ -126,7 +140,8 @@ public extension Conv1D {
126
140
bias: Tensor ( zeros: [ filterShape. 2 ] ) ,
127
141
activation: activation,
128
142
stride: stride,
129
- padding: padding)
143
+ padding: padding,
144
+ dilation: dilation)
130
145
}
131
146
}
132
147
@@ -148,7 +163,9 @@ public struct Conv2D<Scalar: TensorFlowFloatingPoint>: Layer {
148
163
@noDerivative public let strides : ( Int , Int )
149
164
/// The padding algorithm for convolution.
150
165
@noDerivative public let padding : Padding
151
-
166
+ /// The dilation factor for spatials dimensions.
167
+ @noDerivative public let dilations : ( Int , Int )
168
+
152
169
/// Creates a `Conv2D` layer with the specified filter, bias, activation function, strides, and
153
170
/// padding.
154
171
///
@@ -158,18 +175,21 @@ public struct Conv2D<Scalar: TensorFlowFloatingPoint>: Layer {
158
175
/// - activation: The element-wise activation function.
159
176
/// - strides: The strides of the sliding window for spatial dimensions.
160
177
/// - padding: The padding algorithm for convolution.
178
+ /// - dilations: The dilation factor for spatials dimensions.
161
179
public init (
162
180
filter: Tensor < Scalar > ,
163
181
bias: Tensor < Scalar > ,
164
182
activation: @escaping Activation ,
165
183
strides: ( Int , Int ) ,
166
- padding: Padding
184
+ padding: Padding ,
185
+ dilations: ( Int , Int )
167
186
) {
168
187
self . filter = filter
169
188
self . bias = bias
170
189
self . activation = activation
171
190
self . strides = strides
172
191
self . padding = padding
192
+ self . dilations = dilations
173
193
}
174
194
175
195
/// Returns the output obtained from applying the layer to the given input.
@@ -178,8 +198,12 @@ public struct Conv2D<Scalar: TensorFlowFloatingPoint>: Layer {
178
198
/// - Returns: The output.
179
199
@differentiable
180
200
public func callAsFunction( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
181
- return activation ( conv2D ( input, filter: filter, strides: ( 1 , strides. 0 , strides. 1 , 1 ) ,
182
- padding: padding) + bias)
201
+ return activation ( conv2D (
202
+ input,
203
+ filter: filter,
204
+ strides: ( 1 , strides. 0 , strides. 1 , 1 ) ,
205
+ padding: padding,
206
+ dilations: ( 1 , dilations. 0 , dilations. 1 , 1 ) ) + bias)
183
207
}
184
208
}
185
209
@@ -192,6 +216,7 @@ public extension Conv2D {
192
216
/// - filterShape: The shape of the 4-D convolution kernel.
193
217
/// - strides: The strides of the sliding window for spatial dimensions.
194
218
/// - padding: The padding algorithm for convolution.
219
+ /// - dilations: The dilation factor for spatial dimensions.
195
220
/// - activation: The element-wise activation function.
196
221
/// - generator: The random number generator for initialization.
197
222
///
@@ -201,6 +226,7 @@ public extension Conv2D {
201
226
filterShape: ( Int , Int , Int , Int ) ,
202
227
strides: ( Int , Int ) = ( 1 , 1 ) ,
203
228
padding: Padding = . valid,
229
+ dilations: ( Int , Int ) = ( 1 , 1 ) ,
204
230
activation: @escaping Activation = identity,
205
231
generator: inout G
206
232
) {
@@ -211,7 +237,8 @@ public extension Conv2D {
211
237
bias: Tensor ( zeros: [ filterShape. 3 ] ) ,
212
238
activation: activation,
213
239
strides: strides,
214
- padding: padding)
240
+ padding: padding,
241
+ dilations: dilations)
215
242
}
216
243
}
217
244
@@ -224,12 +251,14 @@ public extension Conv2D {
224
251
/// - filterShape: The shape of the 4-D convolution kernel.
225
252
/// - strides: The strides of the sliding window for spatial dimensions.
226
253
/// - padding: The padding algorithm for convolution.
254
+ /// - dilations: The dilation factor for spatial dimensions.
227
255
/// - activation: The element-wise activation function.
228
256
/// - seed: The random seed for initialization. The default value is random.
229
257
init (
230
258
filterShape: ( Int , Int , Int , Int ) ,
231
259
strides: ( Int , Int ) = ( 1 , 1 ) ,
232
260
padding: Padding = . valid,
261
+ dilations: ( Int , Int ) = ( 1 , 1 ) ,
233
262
activation: @escaping Activation = identity,
234
263
seed: ( Int32 , Int32 ) = ( Int32 . random ( in: Int32 . min..< Int32 . max) ,
235
264
Int32 . random ( in: Int32 . min..< Int32 . max) )
@@ -241,7 +270,8 @@ public extension Conv2D {
241
270
bias: Tensor ( zeros: [ filterShape. 3 ] ) ,
242
271
activation: activation,
243
272
strides: strides,
244
- padding: padding)
273
+ padding: padding,
274
+ dilations: dilations)
245
275
}
246
276
}
247
277
0 commit comments