Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit 1e30f3a

Browse files
Shashi456rxwei
authored andcommitted
Fix minor a mistake in Adam documentation (#195)
The decay in Adam is learning rate decay and not weight decay.
1 parent 5a3485d commit 1e30f3a

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

Sources/TensorFlow/Optimizer.swift

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ public class Adam<Model: Layer>: Optimizer
5353
public var beta2: Float
5454
/// A small scalar added to the denominator to improve numerical stability.
5555
public var epsilon: Float
56-
/// The weight decay.
56+
/// The learning rate decay.
5757
public var decay: Float
5858
/// The current step.
5959
public var step: Int = 0
@@ -73,7 +73,7 @@ public class Adam<Model: Layer>: Optimizer
7373
precondition(learningRate >= 0, "Learning rate must be non-negative")
7474
precondition(0 <= beta1 && beta1 <= 1, "Beta parameter must be between 0 and 1")
7575
precondition(0 <= beta2 && beta2 <= 1, "Beta parameter must be between 0 and 1")
76-
precondition(decay >= 0, "Weight decay must be non-negative")
76+
precondition(decay >= 0, "Learning rate decay must be non-negative")
7777

7878
self.learningRate = learningRate
7979
self.beta1 = beta1
@@ -216,7 +216,7 @@ public class SGD<Model: Layer>: Optimizer
216216
public var velocity: Model.AllDifferentiableVariables
217217
/// The set of steps taken.
218218
public var step: Int = 0
219-
219+
220220
public init(
221221
for model: __shared Model,
222222
learningRate: Float = 0.01,

0 commit comments

Comments
 (0)