|
204 | 204 | "outputs": [],
|
205 | 205 | "source": [
|
206 | 206 | "var x: Float = 30\n",
|
207 |
| - "x.gradient { x -> Float in\n", |
| 207 | + "gradient(at: x) { x -> Float in\n", |
208 | 208 | " // Print the partial derivative with respect to the result of `sin(x)`.\n",
|
209 | 209 | " let a = sin(x).withDerivative { print(\"∂+/∂sin = \\($0)\") } \n",
|
210 | 210 | " // Force the partial derivative with respect to `x` to be `0.5`.\n",
|
|
270 | 270 | "let y: Tensor<Float> = [0, 1, 1, 0]\n",
|
271 | 271 | "\n",
|
272 | 272 | "for _ in 0..<10 {\n",
|
273 |
| - " let 𝛁model = classifier.gradient { classifier -> Tensor<Float> in\n", |
| 273 | + " let 𝛁model = gradient(at: classifier) { classifier -> Tensor<Float> in\n", |
274 | 274 | " let ŷ = classifier(x).withDerivative { print(\"∂L/∂ŷ =\", $0) }\n",
|
275 | 275 | " let loss = (ŷ - y).squared().mean()\n",
|
276 | 276 | " print(\"Loss: \\(loss)\")\n",
|
|
378 | 378 | "}\n",
|
379 | 379 | "\n",
|
380 | 380 | "// Differentiate `f(x) = (cos(x))^2`.\n",
|
381 |
| - "let (output, backprop) = input.valueWithPullback { input -> Float in\n", |
| 381 | + "let (output, backprop) = valueWithPullback(at: input) { input -> Float in\n", |
382 | 382 | " return square(cos(input))\n",
|
383 | 383 | "}\n",
|
384 | 384 | "print(\"Running backpropagation...\")\n",
|
|
598 | 598 | " print(\"Starting training step \\(i)\")\n",
|
599 | 599 | " print(\" Running original computation...\")\n",
|
600 | 600 | " let (logits, backprop) = model.appliedForBackpropagation(to: x)\n",
|
601 |
| - " let (loss, dL_dŷ) = logits.valueWithGradient { logits in\n", |
| 601 | + " let (loss, dL_dŷ) = valueWithGradient(at: logits) { logits in\n", |
602 | 602 | " softmaxCrossEntropy(logits: logits, labels: y)\n",
|
603 | 603 | " }\n",
|
604 | 604 | " print(\" Loss: \\(loss)\")\n",
|
|
0 commit comments