Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Make seed type to be Int32 so that it is compatible with TPUs. #169

Merged
merged 1 commit into from
Jun 3, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions Sources/TensorFlow/Initializers.swift
Original file line number Diff line number Diff line change
Expand Up @@ -376,12 +376,12 @@ public extension Tensor where Scalar: BinaryFloatingPoint {
///
init(
randomUniform shape: TensorShape,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
seed: (Int32, Int32) = (Int32.random(in: Int32.min..<Int32.max),
Int32.random(in: Int32.min..<Int32.max))
) {
self = Raw.statelessRandomUniform(
shape: Tensor<Int32>((0..<shape.rank).map { Int32(shape[$0]) }),
seed: Tensor<Int64>([seed.0, seed.1])
seed: Tensor<Int32>([seed.0, seed.1])
)
}

Expand All @@ -394,12 +394,12 @@ public extension Tensor where Scalar: BinaryFloatingPoint {
///
init(
randomNormal shape: TensorShape,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
seed: (Int32, Int32) = (Int32.random(in: Int32.min..<Int32.max),
Int32.random(in: Int32.min..<Int32.max))
) {
self = Raw.statelessRandomNormal(
shape: Tensor<Int32>((0..<shape.rank).map { Int32(shape[$0]) }),
seed: Tensor<Int64>([seed.0, seed.1])
seed: Tensor<Int32>([seed.0, seed.1])
)
}
}
Expand Down Expand Up @@ -475,8 +475,8 @@ public extension Tensor where Scalar: TensorFlowFloatingPoint {
///
init(
glorotUniform shape: TensorShape,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
seed: (Int32, Int32) = (Int32.random(in: Int32.min..<Int32.max),
Int32.random(in: Int32.min..<Int32.max))
) {
let uniform = Tensor(randomUniform: shape, seed: seed)
self = Tensor.glorot(fromStandardUniform: uniform, shape: shape)
Expand Down
16 changes: 8 additions & 8 deletions Sources/TensorFlow/Layers/Convolutional.swift
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,8 @@ public extension Conv1D {
stride: Int = 1,
padding: Padding = .valid,
activation: @escaping Activation = identity,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
seed: (Int32, Int32) = (Int32.random(in: Int32.min..<Int32.max),
Int32.random(in: Int32.min..<Int32.max))
) {
let filterTensorShape = TensorShape([
filterShape.0, filterShape.1, filterShape.2])
Expand Down Expand Up @@ -232,8 +232,8 @@ public extension Conv2D {
strides: (Int, Int) = (1, 1),
padding: Padding = .valid,
activation: @escaping Activation = identity,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
seed: (Int32, Int32) = (Int32.random(in: Int32.min..<Int32.max),
Int32.random(in: Int32.min..<Int32.max))
) {
let filterTensorShape = TensorShape([
filterShape.0, filterShape.1, filterShape.2, filterShape.3])
Expand Down Expand Up @@ -348,8 +348,8 @@ public extension Conv3D {
strides: (Int, Int, Int) = (1, 1, 1),
padding: Padding = .valid,
activation: @escaping Activation = identity,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
seed: (Int32, Int32) = (Int32.random(in: Int32.min..<Int32.max),
Int32.random(in: Int32.min..<Int32.max))
) {
let filterTensorShape = TensorShape([
filterShape.0, filterShape.1, filterShape.2, filterShape.3, filterShape.4])
Expand Down Expand Up @@ -473,8 +473,8 @@ public extension TransposedConv2D {
strides: (Int, Int) = (1, 1),
padding: Padding = .valid,
activation: @escaping Activation = identity,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
seed: (Int32, Int32) = (Int32.random(in: Int32.min..<Int32.max),
Int32.random(in: Int32.min..<Int32.max))
) {
let filterTensorShape = TensorShape([
filterShape.0, filterShape.1, filterShape.2, filterShape.3])
Expand Down
4 changes: 2 additions & 2 deletions Sources/TensorFlow/Layers/Core.swift
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,8 @@ public extension Dense {
inputSize: Int,
outputSize: Int,
activation: @escaping Activation = identity,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
seed: (Int32, Int32) = (Int32.random(in: Int32.min..<Int32.max),
Int32.random(in: Int32.min..<Int32.max))
) {
self.init(weight: Tensor(glorotUniform: [inputSize, outputSize],
seed: seed),
Expand Down
8 changes: 4 additions & 4 deletions Sources/TensorFlow/Layers/Recurrent.swift
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,8 @@ public struct SimpleRNNCell<Scalar: TensorFlowFloatingPoint>: RNNCell, VectorNum
/// - hiddenSize: The number of features in 2-D hidden states.
/// - seed: The random seed for initialization. The default value is random.
public init(inputSize: Int, hiddenSize: Int,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))) {
seed: (Int32, Int32) = (Int32.random(in: Int32.min..<Int32.max),
Int32.random(in: Int32.min..<Int32.max))) {
let concatenatedInputSize = inputSize + hiddenSize
self.weight = Tensor(glorotUniform: [concatenatedInputSize, hiddenSize], seed: seed)
self.bias = Tensor(zeros: [hiddenSize])
Expand Down Expand Up @@ -144,8 +144,8 @@ public struct LSTMCell<Scalar: TensorFlowFloatingPoint>: RNNCell, VectorNumeric
/// - inputSize: The number of features in 2-D input tensors.
/// - hiddenSize: The number of features in 2-D hidden states.
public init(inputSize: Int, hiddenSize: Int,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))) {
seed: (Int32, Int32) = (Int32.random(in: Int32.min..<Int32.max),
Int32.random(in: Int32.min..<Int32.max))) {
let concatenatedInputSize = inputSize + hiddenSize
let gateWeightShape = TensorShape([concatenatedInputSize, hiddenSize])
let gateBiasShape = TensorShape([hiddenSize])
Expand Down
10 changes: 5 additions & 5 deletions Tests/TensorFlowTests/LayerTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -214,15 +214,15 @@ final class LayerTests: XCTestCase {
let x = Tensor<Float>(rangeFrom: 0.0, to: 0.4, stride: 0.1).rankLifted()
let inputs: [Tensor<Float>] = Array(repeating: x, count: 4)
let rnn = RNN(SimpleRNNCell<Float>(inputSize: 4, hiddenSize: 4,
seed: (0xFeedBeef, 0xDeadBeef)))
seed: (0xFeed, 0xBeef)))
let (outputs, _) = rnn.valueWithPullback(at: inputs) { rnn, inputs in
return rnn(inputs)
}
XCTAssertEqual(outputs.map { $0.value },
[[[ -0.00262943, -0.005866742, 0.044919778, 0.20036437]],
[[ 0.066890605, 0.049586136, 0.024610005, 0.09341654]],
[[ 0.065792546, 0.009325638, 0.06439907, 0.114802904]],
[[ 0.055909205, 0.00035158166, 0.054020774, 0.09812111]]])
[[[ 0.20775771, 0.20080023, -0.13768704, -0.18534681]],
[[ 0.22666009, 0.30019346, -0.19720285, -0.14683801]],
[[ 0.23758979, 0.32101023, -0.20359215, -0.1787096]],
[[ 0.24337786, 0.3389194, -0.21143384, -0.1675081]]])
// TODO: Figure out why the following is numerically unstable.
// let (𝛁rnn, _) = pullback(.init(inputs.map { SimpleRNNCell<Float>.State($0) }))
// XCTAssertEqual(𝛁rnn.cell.weight,
Expand Down
4 changes: 2 additions & 2 deletions Tests/TensorFlowTests/SequentialTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ final class SequentialTests: XCTestCase {
var dense1 = Dense<Float>(inputSize: 2, outputSize: 4, activation: relu,
seed: (0xfffffff, 0xfeeff))
var dense2 = Dense<Float>(inputSize: 4, outputSize: 1, activation: relu,
seed: (0xfeffeffe, 0xfffe))
seed: (0xeffeffe, 0xfffe))

@differentiable
func call(_ input: Tensor<Float>) -> Tensor<Float> {
Expand All @@ -41,7 +41,7 @@ final class SequentialTests: XCTestCase {
optimizer.update(&model.allDifferentiableVariables, along: 𝛁model)
}
XCTAssertEqual(model.inferring(from: [[0, 0], [0, 1], [1, 0], [1, 1]]),
[[ 0.491493], [ 0.5063815], [0.49968663], [0.50133944]])
[[ 0.4904838], [0.49942452], [0.49740878], [ 0.5106092]])
}

static var allTests = [
Expand Down
2 changes: 1 addition & 1 deletion Tests/TensorFlowTests/TrivialModelTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ final class TrivialModelTests: XCTestCase {
inputSize: hiddenSize,
outputSize: 1,
activation: relu,
seed: (0xfeffeffe, 0xfffe)
seed: (0xffeffe, 0xfffe)
)
}
@differentiable
Expand Down