@@ -31,8 +31,7 @@ public protocol Layer: Differentiable & KeyPathIterable
31
31
32
32
/// Returns the output obtained from applying the layer to the given input.
33
33
///
34
- /// - Parameters:
35
- /// - input: The input to the layer.
34
+ /// - Parameter input: The input to the layer.
36
35
/// - Returns: The output.
37
36
@differentiable
38
37
func call( _ input: Input ) -> Output
@@ -216,8 +215,7 @@ public struct Dense<Scalar: TensorFlowFloatingPoint>: Layer {
216
215
217
216
/// Returns the output obtained from applying the layer to the given input.
218
217
///
219
- /// - Parameters:
220
- /// - input: The input to the layer.
218
+ /// - Parameter input: The input to the layer.
221
219
/// - Returns: The output.
222
220
@differentiable
223
221
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -325,8 +323,7 @@ public struct Conv1D<Scalar: TensorFlowFloatingPoint>: Layer {
325
323
326
324
/// Returns the output obtained from applying the layer to the given input.
327
325
///
328
- /// - Parameters:
329
- /// - input: The input to the layer `[batchCount, width, inputChannels]`.
326
+ /// - Parameter input: The input to the layer `[batchCount, width, inputChannels]`.
330
327
/// - Returns: The output `[batchCount, newWidth, outputChannels]`.
331
328
@differentiable
332
329
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -444,8 +441,7 @@ public struct Conv2D<Scalar: TensorFlowFloatingPoint>: Layer {
444
441
445
442
/// Returns the output obtained from applying the layer to the given input.
446
443
///
447
- /// - Parameters:
448
- /// - input: The input to the layer.
444
+ /// - Parameter input: The input to the layer.
449
445
/// - Returns: The output.
450
446
@differentiable
451
447
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -563,8 +559,7 @@ public struct TransposedConv2D: Layer {
563
559
564
560
/// Returns the output obtained from applying the layer to the given input.
565
561
///
566
- /// - Parameters:
567
- /// - input: The input to the layer.
562
+ /// - Parameter input: The input to the layer.
568
563
/// - Returns: The output.
569
564
@differentiable
570
565
public func call( _ input: Tensor < Float > ) -> Tensor < Float > {
@@ -718,8 +713,7 @@ public struct BatchNorm<Scalar: TensorFlowFloatingPoint>: Layer {
718
713
719
714
/// Returns the output obtained from applying the layer to the given input.
720
715
///
721
- /// - Parameters:
722
- /// - input: The input to the layer.
716
+ /// - Parameter input: The input to the layer.
723
717
/// - Returns: The output.
724
718
@differentiable ( vjp: _vjpApplied ( to: ) )
725
719
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -796,8 +790,7 @@ public struct MaxPool1D<Scalar: TensorFlowFloatingPoint>: Layer {
796
790
797
791
/// Returns the output obtained from applying the layer to the given input.
798
792
///
799
- /// - Parameters:
800
- /// - input: The input to the layer.
793
+ /// - Parameter input: The input to the layer.
801
794
/// - Returns: The output.
802
795
@differentiable
803
796
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -843,8 +836,7 @@ public struct MaxPool2D<Scalar: TensorFlowFloatingPoint>: Layer {
843
836
844
837
/// Returns the output obtained from applying the layer to the given input.
845
838
///
846
- /// - Parameters:
847
- /// - input: The input to the layer.
839
+ /// - Parameter input: The input to the layer.
848
840
/// - Returns: The output.
849
841
@differentiable
850
842
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -881,8 +873,7 @@ public struct AvgPool1D<Scalar: TensorFlowFloatingPoint>: Layer {
881
873
882
874
/// Returns the output obtained from applying the layer to the given input.
883
875
///
884
- /// - Parameters:
885
- /// - input: The input to the layer.
876
+ /// - Parameter input: The input to the layer.
886
877
/// - Returns: The output.
887
878
@differentiable
888
879
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -928,8 +919,7 @@ public struct AvgPool2D<Scalar: TensorFlowFloatingPoint>: Layer {
928
919
929
920
/// Returns the output obtained from applying the layer to the given input.
930
921
///
931
- /// - Parameters:
932
- /// - input: The input to the layer.
922
+ /// - Parameter input: The input to the layer.
933
923
/// - Returns: The output.
934
924
@differentiable
935
925
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -946,8 +936,7 @@ public struct GlobalAvgPool1D<Scalar: TensorFlowFloatingPoint>: Layer {
946
936
947
937
/// Returns the output obtained from applying the layer to the given input.
948
938
///
949
- /// - Parameters:
950
- /// - input: The input to the layer.
939
+ /// - Parameter input: The input to the layer.
951
940
/// - Returns: The output.
952
941
@differentiable
953
942
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -963,8 +952,7 @@ public struct GlobalAvgPool2D<Scalar: TensorFlowFloatingPoint>: Layer {
963
952
964
953
/// Returns the output obtained from applying the layer to the given input.
965
954
///
966
- /// - Parameters:
967
- /// - input: The input to the layer.
955
+ /// - Parameter input: The input to the layer.
968
956
/// - Returns: The output.
969
957
@differentiable
970
958
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -980,8 +968,7 @@ public struct GlobalAvgPool3D<Scalar: TensorFlowFloatingPoint>: Layer {
980
968
981
969
/// Returns the output obtained from applying the layer to the given input.
982
970
///
983
- /// - Parameters:
984
- /// - input: The input to the layer.
971
+ /// - Parameter input: The input to the layer.
985
972
/// - Returns: The output.
986
973
@differentiable
987
974
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -1035,8 +1022,7 @@ public struct LayerNorm<Scalar: TensorFlowFloatingPoint>: Layer {
1035
1022
1036
1023
/// Returns the output obtained from applying the layer to the given input.
1037
1024
///
1038
- /// - Parameters:
1039
- /// - input: The input to the layer.
1025
+ /// - Parameter input: The input to the layer.
1040
1026
/// - Returns: The output.
1041
1027
@differentiable
1042
1028
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -1085,8 +1071,7 @@ public struct Dropout<Scalar: TensorFlowFloatingPoint>: Layer {
1085
1071
1086
1072
/// Returns the output obtained from applying the layer to the given input.
1087
1073
///
1088
- /// - Parameters:
1089
- /// - input: The input to the layer.
1074
+ /// - Parameter input: The input to the layer.
1090
1075
/// - Returns: The output.
1091
1076
@differentiable ( vjp: _vjpApplied ( to: ) )
1092
1077
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -1129,8 +1114,7 @@ public struct UpSampling1D<Scalar: TensorFlowFloatingPoint>: Layer {
1129
1114
1130
1115
/// Returns the output obtained from applying the layer to the given input.
1131
1116
///
1132
- /// - Parameters:
1133
- /// - input: The input to the layer.
1117
+ /// - Parameter input: The input to the layer.
1134
1118
/// - Returns: The output.
1135
1119
@differentiable
1136
1120
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -1156,8 +1140,7 @@ public struct UpSampling2D<Scalar: TensorFlowFloatingPoint>: Layer {
1156
1140
1157
1141
/// Returns the output obtained from applying the layer to the given input.
1158
1142
///
1159
- /// - Parameters:
1160
- /// - input: The input to the layer.
1143
+ /// - Parameter input: The input to the layer.
1161
1144
/// - Returns: The output.
1162
1145
@differentiable
1163
1146
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -1179,8 +1162,7 @@ public struct Flatten<Scalar: TensorFlowFloatingPoint>: Layer {
1179
1162
1180
1163
/// Returns the output obtained from applying the layer to the given input.
1181
1164
///
1182
- /// - Parameters:
1183
- /// - input: The input to the layer.
1165
+ /// - Parameter input: The input to the layer.
1184
1166
/// - Returns: The output.
1185
1167
@differentiable
1186
1168
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -1216,8 +1198,7 @@ public struct Reshape<Scalar: TensorFlowFloatingPoint>: Layer {
1216
1198
1217
1199
/// Returns the output obtained from applying the layer to the given input.
1218
1200
///
1219
- /// - Parameters:
1220
- /// - input: The input to the layer.
1201
+ /// - Parameter input: The input to the layer.
1221
1202
/// - Returns: The output.
1222
1203
@differentiable
1223
1204
public func call( _ input: Tensor < Scalar > ) -> Tensor < Scalar > {
@@ -1280,7 +1261,7 @@ public extension RNNCell {
1280
1261
}
1281
1262
}
1282
1263
1283
- /// A Simple RNN Cell .
1264
+ /// A simple RNN cell .
1284
1265
public struct SimpleRNNCell < Scalar: TensorFlowFloatingPoint > : RNNCell , VectorNumeric {
1285
1266
public var weight : Tensor < Scalar >
1286
1267
public var bias : Tensor < Scalar >
@@ -1316,10 +1297,7 @@ public struct SimpleRNNCell<Scalar: TensorFlowFloatingPoint>: RNNCell, VectorNum
1316
1297
1317
1298
/// Returns the output obtained from applying the layer to the given input.
1318
1299
///
1319
- /// - Parameters:
1320
- /// - input: The input to the layer.
1321
- /// - context: The contextual information for the layer application, e.g. the current learning
1322
- /// phase.
1300
+ /// - Parameter input: The input to the layer.
1323
1301
/// - Returns: The hidden state.
1324
1302
@differentiable
1325
1303
public func call( _ input: Input ) -> Output {
@@ -1329,7 +1307,7 @@ public struct SimpleRNNCell<Scalar: TensorFlowFloatingPoint>: RNNCell, VectorNum
1329
1307
}
1330
1308
}
1331
1309
1332
- /// An LSTM Cell .
1310
+ /// An LSTM cell .
1333
1311
public struct LSTMCell < Scalar: TensorFlowFloatingPoint > : RNNCell , VectorNumeric {
1334
1312
public var inputWeight , updateWeight , forgetWeight , outputWeight : Tensor < Scalar >
1335
1313
public var inputBias , updateBias , forgetBias , outputBias : Tensor < Scalar >
@@ -1381,10 +1359,7 @@ public struct LSTMCell<Scalar: TensorFlowFloatingPoint>: RNNCell, VectorNumeric
1381
1359
1382
1360
/// Returns the output obtained from applying the layer to the given input.
1383
1361
///
1384
- /// - Parameters:
1385
- /// - input: The input to the layer.
1386
- /// - context: The contextual information for the layer application, e.g. the current learning
1387
- /// phase.
1362
+ /// - Parameter input: The input to the layer.
1388
1363
/// - Returns: The hidden state.
1389
1364
@differentiable
1390
1365
public func call( _ input: Input ) -> Output {
0 commit comments