Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit 731ce40

Browse files
committed
Add MNIST test.
1 parent 473fbff commit 731ce40

File tree

6 files changed

+134
-14
lines changed

6 files changed

+134
-14
lines changed

Package.swift

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -20,23 +20,20 @@ import PackageDescription
2020
let package = Package(
2121
name: "DeepLearning",
2222
products: [
23-
// Products define the executables and libraries produced by a package, and make them visible to other packages.
2423
.library(
2524
name: "DeepLearning",
2625
targets: ["DeepLearning"]),
2726
],
28-
dependencies: [
29-
// Dependencies declare other packages that this package depends on.
30-
// .package(url: /* package url */, from: "1.0.0"),
31-
],
27+
dependencies: [],
3228
targets: [
33-
// Targets are the basic building blocks of a package. A target can define a module or a test suite.
34-
// Targets can depend on other targets in this package, and on products in packages which this package depends on.
3529
.target(
3630
name: "DeepLearning",
3731
dependencies: []),
3832
.testTarget(
3933
name: "DeepLearningTests",
4034
dependencies: ["DeepLearning"]),
35+
.testTarget(
36+
name: "MNIST",
37+
dependencies: ["DeepLearning"]),
4138
]
4239
)

Tests/DeepLearningTests/TrivialModelTests.swift

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ final class TrivialModelTests: XCTestCase {
2424
l2 = Dense<Float>(inputSize: hiddenSize, outputSize: 1)
2525
}
2626
func applied(to input: Tensor<Float>) -> Tensor<Float> {
27-
let h1 = sigmoid(l1.applied(to: input))
28-
return sigmoid(l2.applied(to: h1))
27+
let h1 = relu(l1.applied(to: input))
28+
return relu(l2.applied(to: h1))
2929
}
3030
}
3131
let optimizer = SGD<Classifier, Float>(learningRate: 0.02)
@@ -35,11 +35,9 @@ final class TrivialModelTests: XCTestCase {
3535
for _ in 0..<1000 {
3636
let (loss, 𝛁model) = classifier.valueWithGradient { classifier -> Tensor<Float> in
3737
let ŷ = classifier.applied(to: x)
38-
return (y - ŷ).squared().mean()
38+
return meanSquaredError(predicted: ŷ, expected: y)
3939
}
40-
print(loss)
41-
optimizer.update(&classifier.allDifferentiableVariables,
42-
along: 𝛁model)
40+
optimizer.update(&classifier.allDifferentiableVariables, along: 𝛁model)
4341
}
4442
print(classifier.applied(to: [[0, 0], [0, 1], [1, 0], [1, 1]]))
4543
}

Tests/DeepLearningTests/XCTestManifests.swift

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@ import XCTest
1717
#if !os(macOS)
1818
public func allTests() -> [XCTestCaseEntry] {
1919
return [
20-
testCase(DeepLearningTests.allTests),
20+
testCase(TrivialModelTests.allTests),
21+
testCase(MNISTTests.allTests),
2122
]
2223
}
2324
#endif

Tests/MNIST/MNIST.swift

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
// Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
import XCTest
16+
import Python
17+
@testable import DeepLearning
18+
19+
let gzip = Python.import("gzip")
20+
let np = Python.import("numpy")
21+
22+
func readImagesFile(_ filename: String) -> [Float] {
23+
let file = gzip.open(filename, "rb").read()
24+
let data = np.frombuffer(file, dtype: np.uint8, offset: 16)
25+
let array = data.astype(np.float32) / 255
26+
return Array(numpyArray: array)!
27+
}
28+
29+
func readLabelsFile(_ filename: String) -> [Int32] {
30+
let file = gzip.open(filename, "rb").read()
31+
let data = np.frombuffer(file, dtype: np.uint8, offset: 8)
32+
let array = data.astype(np.int32)
33+
return Array(numpyArray: array)!
34+
}
35+
36+
/// Reads MNIST images and labels from specified file paths.
37+
func readMNIST(imagesFile: String, labelsFile: String)
38+
-> (images: Tensor<Float>, labels: Tensor<Int32>) {
39+
print("Reading data.")
40+
let images = readImagesFile(imagesFile)
41+
let labels = readLabelsFile(labelsFile)
42+
let rowCount = Int32(labels.count)
43+
let columnCount = Int32(images.count) / rowCount
44+
45+
print("Constructing data tensors.")
46+
let imagesTensor = Tensor(shape: [rowCount, columnCount], scalars: images) / 255
47+
let labelsTensor = Tensor(labels)
48+
return (imagesTensor, labelsTensor)
49+
}
50+
51+
struct MNISTClassifier: Layer {
52+
var l1, l2: Dense<Float>
53+
init(hiddenSize: Int) {
54+
l1 = Dense<Float>(inputSize: 784, outputSize: hiddenSize)
55+
l2 = Dense<Float>(inputSize: hiddenSize, outputSize: 10)
56+
}
57+
func applied(to input: Tensor<Float>) -> Tensor<Float> {
58+
let h1 = sigmoid(l1.applied(to: input))
59+
return logSoftmax(l2.applied(to: h1))
60+
}
61+
}
62+
63+
final class MNISTTests: XCTestCase {
64+
func testMNIST() {
65+
// Get training data.
66+
let (images, numericLabels) = readMNIST(imagesFile: "train-images-idx3-ubyte.gz",
67+
labelsFile: "train-labels-idx1-ubyte.gz")
68+
let labels = Tensor<Float>(oneHotAtIndices: numericLabels, depth: 10)
69+
70+
let batchSize = images.shape[0]
71+
let optimizer = RMSProp<MNISTClassifier, Float>(learningRate: 0.2)
72+
var classifier = MNISTClassifier(hiddenSize: 30)
73+
74+
// Hyper-parameters.
75+
let epochCount = 20
76+
let minibatchSize: Int32 = 10
77+
let learningRate: Float = 0.2
78+
var loss = Float.infinity
79+
80+
// Training loop.
81+
print("Begin training for \(epochCount) epochs.")
82+
83+
func minibatch<Scalar>(_ x: Tensor<Scalar>, index: Int32) -> Tensor<Scalar> {
84+
let start = index * minibatchSize
85+
return x[start..<start+minibatchSize]
86+
}
87+
88+
for epoch in 0...epochCount {
89+
// Store information for printing accuracy and loss.
90+
var correctPredictions = 0
91+
var totalLoss: Float = 0
92+
93+
let iterationCount = batchSize / minibatchSize
94+
for i in 0..<iterationCount {
95+
let images = minibatch(images, index: i)
96+
let numericLabels = minibatch(numericLabels, index: i)
97+
let labels = minibatch(labels, index: i)
98+
99+
let (loss, 𝛁model) = classifier.valueWithGradient { classifier -> Tensor<Float> in
100+
let ŷ = classifier.applied(to: images)
101+
102+
// Update number of correct predictions.
103+
let correctlyPredicted = ŷ.argmax(squeezingAxis: 1) .== numericLabels
104+
correctPredictions += Int(Tensor<Int32>(correctlyPredicted).sum().scalarized())
105+
106+
return -(labels * ŷ).sum() / Tensor(10)
107+
}
108+
optimizer.update(&classifier.allDifferentiableVariables, along: 𝛁model)
109+
totalLoss += loss.scalarized()
110+
}
111+
print("""
112+
[Epoch \(epoch)] \
113+
Accuracy: \(correctPredictions)/\(batchSize) \
114+
(\(Float(correctPredictions) / Float(batchSize)))\t\
115+
Loss: \(totalLoss / Float(batchSize))
116+
""")
117+
}
118+
print("Done training MNIST.")
119+
}
120+
121+
static var allTests = [
122+
("testMNIST", testMNIST),
123+
]
124+
}

Tests/MNIST/train-images-idx3-ubyte

44.9 MB
Binary file not shown.

Tests/MNIST/train-labels-idx1-ubyte

58.6 KB
Binary file not shown.

0 commit comments

Comments
 (0)