This repository was archived by the owner on Apr 23, 2025. It is now read-only.
File tree Expand file tree Collapse file tree 7 files changed +8
-8
lines changed Expand file tree Collapse file tree 7 files changed +8
-8
lines changed Original file line number Diff line number Diff line change @@ -93,6 +93,6 @@ for epoch in 1...epochCount {
93
93
return meanSquaredError ( predicted: image, expected: x)
94
94
}
95
95
96
- optimizer. update ( & autoencoder. allDifferentiableVariables , along: 𝛁model)
96
+ optimizer. update ( & autoencoder, along: 𝛁model)
97
97
}
98
98
}
Original file line number Diff line number Diff line change @@ -85,7 +85,7 @@ extension CatchAgent {
85
85
86
86
let 𝛁loss = - log( Tensor < Float > ( ŷ. max ( ) ) ) . broadcasted ( like: ŷ) * previousReward
87
87
let ( 𝛁model, _) = backprop ( 𝛁loss)
88
- optimizer. update ( & model. allDifferentiableVariables , along: 𝛁model)
88
+ optimizer. update ( & model, along: 𝛁model)
89
89
90
90
return CatchAction ( rawValue: Int ( maxIndex) ) !
91
91
}
Original file line number Diff line number Diff line change @@ -39,7 +39,7 @@ for epoch in 1...100 {
39
39
}
40
40
trainingLossSum += loss. scalarized ( )
41
41
trainingBatchCount += 1
42
- optimizer. update ( & model. allDifferentiableVariables , along: gradients)
42
+ optimizer. update ( & model, along: gradients)
43
43
}
44
44
45
45
var testLossSum : Float = 0
Original file line number Diff line number Diff line change @@ -52,7 +52,7 @@ for epoch in 1...epochCount {
52
52
return loss
53
53
}
54
54
// Update the model's differentiable variables along the gradient vector.
55
- optimizer. update ( & classifier. allDifferentiableVariables , along: 𝛁model)
55
+ optimizer. update ( & classifier, along: 𝛁model)
56
56
}
57
57
58
58
Context . local. learningPhase = . inference
Original file line number Diff line number Diff line change @@ -44,7 +44,7 @@ for epoch in 1...10 {
44
44
}
45
45
trainingLossSum += loss. scalarized ( )
46
46
trainingBatchCount += 1
47
- optimizer. update ( & model. allDifferentiableVariables , along: gradients)
47
+ optimizer. update ( & model, along: gradients)
48
48
}
49
49
var testLossSum : Float = 0
50
50
var testBatchCount = 0
Original file line number Diff line number Diff line change @@ -154,7 +154,7 @@ for epoch in 1...epochCount {
154
154
let loss = generatorLoss ( fakeLogits: fakeLogits)
155
155
return loss
156
156
}
157
- optG. update ( & generator. allDifferentiableVariables , along: 𝛁generator)
157
+ optG. update ( & generator, along: 𝛁generator)
158
158
159
159
// Update discriminator.
160
160
let realImages = dataset. trainingImages. minibatch ( at: i, batchSize: batchSize)
@@ -167,7 +167,7 @@ for epoch in 1...epochCount {
167
167
let loss = discriminatorLoss ( realLogits: realLogits, fakeLogits: fakeLogits)
168
168
return loss
169
169
}
170
- optD. update ( & discriminator. allDifferentiableVariables , along: 𝛁discriminator)
170
+ optD. update ( & discriminator, along: 𝛁discriminator)
171
171
}
172
172
173
173
// Start inference phase.
Original file line number Diff line number Diff line change @@ -184,7 +184,7 @@ while true {
184
184
return loss
185
185
}
186
186
}
187
- optimizer. update ( & net. allDifferentiableVariables , along: gradients)
187
+ optimizer. update ( & net, along: gradients)
188
188
189
189
print ( " It has episode count \( episodeCount) and mean reward \( meanReward) " )
190
190
You can’t perform that action at this time.
0 commit comments