Skip to content

Commit b4cfae8

Browse files
committed
'gradient' -> 'vector'
1 parent cc7c23d commit b4cfae8

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

Sources/DeepLearning/Optimizer.swift

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -64,10 +64,10 @@ public class Adam<Model: Layer, Scalar: TensorFlowFloatingPoint>: Optimizer
6464
let stepSize = learningRate * (sqrt(1 - pow(beta2, step)) / (1 - pow(beta1, step)))
6565
for kp in model.recursivelyAllWritableKeyPaths(to: Tensor<Scalar>.self) {
6666
firstMoments[keyPath: kp] =
67-
firstMoments[keyPath: kp] * beta1 + (1 - beta1) * gradient[keyPath: kp]
67+
firstMoments[keyPath: kp] * beta1 + (1 - beta1) * vector[keyPath: kp]
6868
secondMoments[keyPath: kp] =
6969
secondMoments[keyPath: kp] * beta2 + (1 - beta2) *
70-
gradient[keyPath: kp] * gradient[keyPath: kp]
70+
vector[keyPath: kp] * vector[keyPath: kp]
7171
model[keyPath: kp] -=
7272
stepSize * firstMoments[keyPath: kp] / (sqrt(secondMoments[keyPath: kp]) + epsilon)
7373
}
@@ -106,9 +106,9 @@ public class RMSProp<Model: Layer, Scalar: TensorFlowFloatingPoint>: Optimizer
106106
let learningRate = self.learningRate * 1 / (1 + decay * step)
107107
for kp in model.recursivelyAllWritableKeyPaths(to: Tensor<Scalar>.self) {
108108
alpha[keyPath: kp] =
109-
rho * alpha[keyPath: kp] + (1 - rho) * pow(gradient[keyPath: kp], 2)
109+
rho * alpha[keyPath: kp] + (1 - rho) * pow(vector[keyPath: kp], 2)
110110
model[keyPath: kp] -=
111-
learningRate * gradient[keyPath: kp] / (sqrt(alpha[keyPath: kp]) + epsilon)
111+
learningRate * vector[keyPath: kp] / (sqrt(alpha[keyPath: kp]) + epsilon)
112112
}
113113
}
114114
}
@@ -140,15 +140,15 @@ public class SGD<Model: Layer, Scalar: TensorFlowFloatingPoint>: Optimizer
140140
private var velocity = Model.AllDifferentiableVariables.zero
141141

142142
public func update(_ model: inout Model.AllDifferentiableVariables,
143-
along vectors: Model.CotangentVector) {
143+
along vector: Model.CotangentVector) {
144144
step += 1
145145
let learningRate = self.learningRate * 1 / (1 + decay * step)
146146
for kp in model.recursivelyAllWritableKeyPaths(to: Tensor<Scalar>.self) {
147147
velocity[keyPath: kp] =
148-
momentum * velocity[keyPath: kp] - learningRate * gradients[keyPath: kp]
148+
momentum * velocity[keyPath: kp] - learningRate * vector[keyPath: kp]
149149
if nesterov {
150150
model[keyPath: kp] +=
151-
momentum * velocity[keyPath: kp] - learningRate * gradients[keyPath: kp]
151+
momentum * velocity[keyPath: kp] - learningRate * vector[keyPath: kp]
152152
} else {
153153
model[keyPath: kp] += velocity[keyPath: kp]
154154
}
@@ -168,6 +168,6 @@ public class RiemannSGD<Model: Layer, Scalar: FloatingPoint>: Optimizer
168168

169169
public func update(_ model: inout Model.AllDifferentiableVariables,
170170
along vector: Model.CotangentVector) {
171-
model = model.moved(along: learningRate * (.zero - model.tangentVector(from: gradient)))
171+
model = model.moved(along: learningRate * (.zero - model.tangentVector(from: vector)))
172172
}
173173
}

0 commit comments

Comments
 (0)