Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit a265f8f

Browse files
authored
Added default values to some of the convolution layer initializers. (#277)
1 parent f0d3a16 commit a265f8f

File tree

1 file changed

+40
-33
lines changed

1 file changed

+40
-33
lines changed

Sources/TensorFlow/Layers/Convolutional.swift

Lines changed: 40 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,11 @@ public struct Conv1D<Scalar: TensorFlowFloatingPoint>: Layer {
2626
public typealias Activation = @differentiable (Tensor<Scalar>) -> Tensor<Scalar>
2727
/// The element-wise activation function.
2828
@noDerivative public let activation: Activation
29-
/// The stride of the sliding window for temporal dimension.
29+
/// The stride of the sliding window for the temporal dimension.
3030
@noDerivative public let stride: Int
3131
/// The padding algorithm for convolution.
3232
@noDerivative public let padding: Padding
33-
/// The dilation factor for temporal dimension.
33+
/// The dilation factor for the temporal dimension.
3434
@noDerivative public let dilation: Int
3535

3636
/// Creates a `Conv1D` layer with the specified filter, bias, activation function, stride, and
@@ -40,16 +40,16 @@ public struct Conv1D<Scalar: TensorFlowFloatingPoint>: Layer {
4040
/// - filter: The 3-D convolution kernel `[width, inputChannels, outputChannels]`.
4141
/// - bias: The bias vector `[outputChannels]`.
4242
/// - activation: The element-wise activation function.
43-
/// - stride: The stride of the sliding window for temporal dimension.
43+
/// - stride: The stride of the sliding window for the temporal dimension.
4444
/// - padding: The padding algorithm for convolution.
45-
/// - dilation: The dilation factor for temporal dimension.
45+
/// - dilation: The dilation factor for the temporal dimension.
4646
public init(
4747
filter: Tensor<Scalar>,
4848
bias: Tensor<Scalar>,
49-
activation: @escaping Activation,
50-
stride: Int,
51-
padding: Padding,
52-
dilation: Int
49+
activation: @escaping Activation = identity,
50+
stride: Int = 1,
51+
padding: Padding = .valid,
52+
dilation: Int = 1
5353
) {
5454
self.filter = filter
5555
self.bias = bias
@@ -83,9 +83,9 @@ public extension Conv1D where Scalar.RawSignificand: FixedWidthInteger {
8383
/// - Parameters:
8484
/// - filterShape: The 3-D shape of the filter, representing
8585
/// `[width, inputChannels, outputChannels]`.
86-
/// - stride: The stride of the sliding window for temporal dimension.
86+
/// - stride: The stride of the sliding window for the temporal dimension.
8787
/// - padding: The padding algorithm for convolution.
88-
/// - dilation: The dilation factor for temporal dimension.
88+
/// - dilation: The dilation factor for the temporal dimension.
8989
/// - activation: The element-wise activation function.
9090
/// - generator: The random number generator for initialization.
9191
///
@@ -119,7 +119,7 @@ public extension Conv1D {
119119
/// - Parameters:
120120
/// - filterShape: The 3-D shape of the filter, representing
121121
/// `[width, inputChannels, outputChannels]`.
122-
/// - stride: The stride of the sliding window for temporal dimension.
122+
/// - stride: The stride of the sliding window for the temporal dimension.
123123
/// - padding: The padding algorithm for convolution.
124124
/// - dilation: The dilation factor for the temporal dimension.
125125
/// - activation: The element-wise activation function.
@@ -179,10 +179,10 @@ public struct Conv2D<Scalar: TensorFlowFloatingPoint>: Layer {
179179
public init(
180180
filter: Tensor<Scalar>,
181181
bias: Tensor<Scalar>,
182-
activation: @escaping Activation,
183-
strides: (Int, Int),
184-
padding: Padding,
185-
dilations: (Int, Int)
182+
activation: @escaping Activation = identity,
183+
strides: (Int, Int) = (1, 1),
184+
padding: Padding = .valid,
185+
dilations: (Int, Int) = (1, 1)
186186
) {
187187
self.filter = filter
188188
self.bias = bias
@@ -306,9 +306,9 @@ public struct Conv3D<Scalar: TensorFlowFloatingPoint>: Layer {
306306
public init(
307307
filter: Tensor<Scalar>,
308308
bias: Tensor<Scalar>,
309-
activation: @escaping Activation,
310-
strides: (Int, Int, Int),
311-
padding: Padding
309+
activation: @escaping Activation = identity,
310+
strides: (Int, Int, Int) = (1, 1, 1),
311+
padding: Padding = .valid
312312
) {
313313
self.filter = filter
314314
self.bias = bias
@@ -323,9 +323,11 @@ public struct Conv3D<Scalar: TensorFlowFloatingPoint>: Layer {
323323
/// - Returns: The output.
324324
@differentiable
325325
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
326-
return activation(conv3D(input, filter: filter,
327-
strides: (1, strides.0, strides.1, strides.2, 1),
328-
padding: padding) + bias)
326+
return activation(conv3D(
327+
input,
328+
filter: filter,
329+
strides: (1, strides.0, strides.1, strides.2, 1),
330+
padding: padding) + bias)
329331
}
330332
}
331333

@@ -423,9 +425,9 @@ public struct TransposedConv2D: Layer {
423425
public init(
424426
filter: Tensor<Float>,
425427
bias: Tensor<Float>,
426-
activation: @escaping Activation,
427-
strides: (Int, Int),
428-
padding: Padding
428+
activation: @escaping Activation = identity,
429+
strides: (Int, Int) = (1, 1),
430+
padding: Padding = .valid
429431
) {
430432
self.filter = filter
431433
self.bias = bias
@@ -448,9 +450,12 @@ public struct TransposedConv2D: Layer {
448450
strides.1 + (filter.shape[1] * paddingIndex)
449451
let c = filter.shape[2]
450452
let newShape = Tensor<Int32>([Int32(batchSize), Int32(w), Int32(h), Int32(c)])
451-
return activation(conv2DBackpropInput(input, shape: newShape, filter: filter,
452-
strides: (1, strides.0, strides.1, 1),
453-
padding: padding) + bias)
453+
return activation(conv2DBackpropInput(
454+
input,
455+
shape: newShape,
456+
filter: filter,
457+
strides: (1, strides.0, strides.1, 1),
458+
padding: padding) + bias)
454459
}
455460
}
456461

@@ -547,9 +552,9 @@ public struct DepthwiseConv2D<Scalar: TensorFlowFloatingPoint>: Layer {
547552
public init(
548553
filter: Tensor<Scalar>,
549554
bias: Tensor<Scalar>,
550-
activation: @escaping Activation,
551-
strides: (Int, Int),
552-
padding: Padding
555+
activation: @escaping Activation = identity,
556+
strides: (Int, Int) = (1, 1),
557+
padding: Padding = .valid
553558
) {
554559
self.filter = filter
555560
self.bias = bias
@@ -564,9 +569,11 @@ public struct DepthwiseConv2D<Scalar: TensorFlowFloatingPoint>: Layer {
564569
/// - Returns: The output.
565570
@differentiable
566571
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
567-
return activation(depthwiseConv2D(input, filter: filter,
568-
strides: (1, strides.0, strides.1, 1),
569-
padding: padding) + bias)
572+
return activation(depthwiseConv2D(
573+
input,
574+
filter: filter,
575+
strides: (1, strides.0, strides.1, 1),
576+
padding: padding) + bias)
570577
}
571578
}
572579

0 commit comments

Comments
 (0)