@@ -26,7 +26,7 @@ public protocol Optimizer {
26
26
/// The scalar parameter type.
27
27
associatedtype Scalar : FloatingPoint
28
28
/// The learning rate.
29
- var learningRate : Scalar { get }
29
+ var learningRate : Scalar { get set }
30
30
/// Updates the specified differentiable variables along the specified
31
31
/// direction.
32
32
mutating func update( _ variables: inout Model . AllDifferentiableVariables ,
@@ -42,17 +42,17 @@ public protocol Optimizer {
42
42
public class Adam < Model: Layer , Scalar: TensorFlowFloatingPoint > : Optimizer
43
43
where Model. AllDifferentiableVariables == Model . CotangentVector {
44
44
/// The learning rate.
45
- public let learningRate : Scalar
45
+ public var learningRate : Scalar
46
46
/// A coefficient used to calculate the first and second moments of
47
47
/// gradients.
48
48
public var beta1 : Scalar
49
49
/// A coefficient used to calculate the first and second moments of
50
50
/// gradients.
51
51
public var beta2 : Scalar
52
52
/// A small scalar added to the denominator to improve numerical stability.
53
- public let epsilon : Scalar
53
+ public var epsilon : Scalar
54
54
/// The weight decay.
55
- public let decay : Scalar
55
+ public var decay : Scalar
56
56
57
57
public init (
58
58
learningRate: Scalar = 1e-3 ,
@@ -122,13 +122,13 @@ public class Adam<Model: Layer, Scalar: TensorFlowFloatingPoint>: Optimizer
122
122
public class RMSProp < Model: Layer , Scalar: TensorFlowFloatingPoint > : Optimizer
123
123
where Model. AllDifferentiableVariables == Model . CotangentVector {
124
124
/// The learning rate.
125
- public let learningRate : Scalar
125
+ public var learningRate : Scalar
126
126
// TODO: Document `rho`. Keras doesn't document `rho`.
127
- public let rho : Scalar
127
+ public var rho : Scalar
128
128
/// A small scalar added to the denominator to improve numerical stability.
129
- public let epsilon : Scalar
129
+ public var epsilon : Scalar
130
130
/// The weight decay.
131
- public let decay : Scalar
131
+ public var decay : Scalar
132
132
133
133
public init (
134
134
learningRate: Scalar = 0.001 ,
@@ -180,14 +180,14 @@ public class RMSProp<Model: Layer, Scalar: TensorFlowFloatingPoint>: Optimizer
180
180
public class SGD < Model: Layer , Scalar: TensorFlowFloatingPoint > : Optimizer
181
181
where Model. AllDifferentiableVariables == Model . CotangentVector {
182
182
/// The learning rate.
183
- public let learningRate : Scalar
183
+ public var learningRate : Scalar
184
184
/// The momentum factor. It accelerates stochastic gradient descent in the relevant direction
185
185
/// and dampens oscillations.
186
- public let momentum : Scalar
186
+ public var momentum : Scalar
187
187
/// The weight decay.
188
- public let decay : Scalar
188
+ public var decay : Scalar
189
189
/// Use Nesterov momentum if true.
190
- public let nesterov : Bool
190
+ public var nesterov : Bool
191
191
192
192
public init (
193
193
learningRate: Scalar = 0.01 ,
0 commit comments