Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit 5b845e3

Browse files
authored
Fix deprecation warnings. (Follow-up to #543) (#544)
As a follow-up to #543 which renamed `Raw` to `_Raw`, this change updates all users of `Raw` in the repository to use `_Raw` instead. This fixes all the deprecation warnings.
1 parent 6c20f74 commit 5b845e3

18 files changed

+221
-221
lines changed

Sources/TensorFlow/Core/ArrayOps.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
import CTensorFlow
1616

17-
public extension Raw {
17+
public extension _Raw {
1818
/// Saves tensors in V2 checkpoint format.
1919
///
2020
/// By default, saves the named tensors in full. If the caller wishes to save specific slices

Sources/TensorFlow/Core/Tensor.swift

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ public extension Tensor {
8181
var rankTensor: Tensor<Int32> {
8282
@_semantics("autodiff.nonvarying")
8383
get {
84-
return Raw.rank(self)
84+
return _Raw.rank(self)
8585
}
8686
}
8787

@@ -90,7 +90,7 @@ public extension Tensor {
9090
var shapeTensor: Tensor<Int32> {
9191
@_semantics("autodiff.nonvarying")
9292
get {
93-
return Raw.shape(self)
93+
return _Raw.shape(self)
9494
}
9595
}
9696

@@ -99,7 +99,7 @@ public extension Tensor {
9999
var scalarCountTensor: Tensor<Int32> {
100100
@_semantics("autodiff.nonvarying")
101101
get {
102-
return Raw.size(self)
102+
return _Raw.size(self)
103103
}
104104
}
105105
}
@@ -393,7 +393,7 @@ extension _TensorElementLiteral: ExpressibleByArrayLiteral {
393393
public typealias ArrayLiteralElement = _TensorElementLiteral<Scalar>
394394
@inlinable
395395
public init(arrayLiteral elements: _TensorElementLiteral<Scalar>...) {
396-
tensor = Raw.pack(elements.map { $0.tensor })
396+
tensor = _Raw.pack(elements.map { $0.tensor })
397397
}
398398
}
399399

@@ -406,7 +406,7 @@ extension Tensor: ExpressibleByArrayLiteral {
406406
/// separate method because `ShapedArray` initializers need to call it.
407407
@inlinable
408408
internal init(_tensorElementLiterals elements: [_TensorElementLiteral<Scalar>]) {
409-
self = Raw.pack(elements.map { $0.tensor })
409+
self = _Raw.pack(elements.map { $0.tensor })
410410
}
411411

412412
/// Creates a tensor initialized with the given elements.
@@ -541,15 +541,15 @@ extension Tensor: AdditiveArithmetic where Scalar: Numeric {
541541
@inlinable
542542
@differentiable(vjp: _vjpAdd(lhs:rhs:) where Scalar: TensorFlowFloatingPoint)
543543
public static func + (lhs: Tensor, rhs: Tensor) -> Tensor {
544-
Raw.addV2(lhs, rhs)
544+
_Raw.addV2(lhs, rhs)
545545
}
546546

547547
/// Subtracts one tensor from another and produces their difference.
548548
/// - Note: `-` supports broadcasting.
549549
@inlinable
550550
@differentiable(vjp: _vjpSubtract(lhs:rhs:) where Scalar: TensorFlowFloatingPoint)
551551
public static func - (lhs: Tensor, rhs: Tensor) -> Tensor {
552-
Raw.sub(lhs, rhs)
552+
_Raw.sub(lhs, rhs)
553553
}
554554
}
555555

@@ -559,7 +559,7 @@ internal extension Tensor where Scalar: TensorFlowFloatingPoint {
559559
(lhs + rhs, { [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in
560560
let lhsGrad = v
561561
let rhsGrad = lhsGrad
562-
let (lhsAxes, rhsAxes) = Raw.broadcastGradientArgs(s0: lhsShape, s1: rhsShape)
562+
let (lhsAxes, rhsAxes) = _Raw.broadcastGradientArgs(s0: lhsShape, s1: rhsShape)
563563
return (lhsGrad.sum(squeezingAxes: lhsAxes).reshaped(toShape: lhsShape),
564564
rhsGrad.sum(squeezingAxes: rhsAxes).reshaped(toShape: rhsShape))
565565
})
@@ -570,7 +570,7 @@ internal extension Tensor where Scalar: TensorFlowFloatingPoint {
570570
(lhs - rhs, { [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in
571571
let lhsGrad = v
572572
let rhsGrad = -lhsGrad
573-
let (lhsAxes, rhsAxes) = Raw.broadcastGradientArgs(s0: lhsShape, s1: rhsShape)
573+
let (lhsAxes, rhsAxes) = _Raw.broadcastGradientArgs(s0: lhsShape, s1: rhsShape)
574574
return (lhsGrad.sum(squeezingAxes: lhsAxes).reshaped(toShape: lhsShape),
575575
rhsGrad.sum(squeezingAxes: rhsAxes).reshaped(toShape: rhsShape))
576576
})

Sources/TensorFlow/Initializers.swift

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ public extension Tensor {
3232
@inlinable
3333
@differentiable(vjp: _vjpInit(repeating:shape:) where Scalar: TensorFlowFloatingPoint)
3434
init(repeating repeatedValue: Scalar, shape: TensorShape) {
35-
self = Raw.fill(
35+
self = _Raw.fill(
3636
dims: Tensor<Int32>(shape.dimensions.map(Int32.init)),
3737
value: Tensor(repeatedValue))
3838
}
@@ -74,15 +74,15 @@ public extension Tensor where Scalar: Numeric {
7474
/// Perform an element-wise type conversion from a `Bool` tensor.
7575
@inlinable
7676
init(_ other: Tensor<Bool>) {
77-
self = Raw.cast(other)
77+
self = _Raw.cast(other)
7878
}
7979

8080
/// Perform an element-wise conversion from another `Tensor`.
8181
@inlinable
8282
@differentiable(
8383
vjp: _vjpCast where Scalar: TensorFlowFloatingPoint, OtherScalar: TensorFlowFloatingPoint)
8484
init<OtherScalar: Numeric>(_ other: Tensor<OtherScalar>) {
85-
self = Raw.cast(other)
85+
self = _Raw.cast(other)
8686
}
8787
}
8888

@@ -104,7 +104,7 @@ public extension Tensor {
104104
@inlinable
105105
@differentiable(vjp: _vjpInitElements where Scalar: TensorFlowFloatingPoint)
106106
init(_ elements: [Tensor]) {
107-
self = Raw.pack(elements)
107+
self = _Raw.pack(elements)
108108
}
109109

110110
/// Stacks `tensors`, along the `axis` dimension, into a new tensor with rank one higher than
@@ -138,7 +138,7 @@ public extension Tensor {
138138
@inlinable
139139
@differentiable(vjp: _vjpStacking where Scalar: TensorFlowFloatingPoint)
140140
init(stacking tensors: [Tensor], alongAxis axis: Int = 0) {
141-
self = Raw.pack(tensors, axis: Int64(axis))
141+
self = _Raw.pack(tensors, axis: Int64(axis))
142142
}
143143

144144
/// Concatenates `tensors` along the `axis` dimension.
@@ -177,7 +177,7 @@ public extension Tensor {
177177
@differentiable(vjp: _vjpConcatenating where Scalar: TensorFlowFloatingPoint)
178178
init(concatenating tensors: [Tensor], alongAxis axis: Int = 0) {
179179
precondition(tensors.count > 0)
180-
self = Raw.concatV2(tensors, axis: Tensor<Int32>(Int32(axis)))
180+
self = _Raw.concatV2(tensors, axis: Tensor<Int32>(Int32(axis)))
181181
}
182182
}
183183

@@ -242,7 +242,7 @@ public extension Tensor where Scalar: Numeric {
242242
/// - Parameter other: Tensor whose shape and data type to use.
243243
@inlinable
244244
init(zerosLike other: Tensor) {
245-
self = Raw.zerosLike(other)
245+
self = _Raw.zerosLike(other)
246246
}
247247

248248
/// Creates a tensor with all scalars set to one that has the same shape and type as the provided
@@ -251,7 +251,7 @@ public extension Tensor where Scalar: Numeric {
251251
/// - Parameter other: Tensor whose shape and data type to use.
252252
@inlinable
253253
init(onesLike other: Tensor) {
254-
self = Raw.onesLike(other)
254+
self = _Raw.onesLike(other)
255255
}
256256

257257
/// Creates a 1-D tensor representing a sequence from a starting value to, but not including,
@@ -266,7 +266,7 @@ public extension Tensor where Scalar: Numeric {
266266
/// positive.
267267
@inlinable
268268
init(rangeFrom start: Scalar, to end: Scalar, stride: Scalar) {
269-
self = Raw.range(start: Tensor(start), limit: Tensor(end), delta: Tensor(stride))
269+
self = _Raw.range(start: Tensor(start), limit: Tensor(end), delta: Tensor(stride))
270270
}
271271

272272
/// Creates a 1-D tensor representing a sequence from a starting value to, but not including, an
@@ -280,7 +280,7 @@ public extension Tensor where Scalar: Numeric {
280280
/// - stride: The amount to step by with each iteration. `stride` must be positive.
281281
@inlinable
282282
init(rangeFrom start: Tensor<Scalar>, to end: Tensor<Scalar>, stride: Tensor<Scalar>) {
283-
self = Raw.range(start: start, limit: end, delta: stride)
283+
self = _Raw.range(start: start, limit: end, delta: stride)
284284
}
285285

286286
/// Creates a one-hot tensor at given indices. The locations represented by
@@ -318,7 +318,7 @@ public extension Tensor where Scalar: Numeric {
318318
offValue: Scalar = 0,
319319
axis: Int = -1
320320
) {
321-
self = Raw.oneHot(
321+
self = _Raw.oneHot(
322322
indices: indices,
323323
depth: Tensor<Int32>(Int32(depth)),
324324
onValue: Tensor(onValue),
@@ -339,7 +339,7 @@ public extension Tensor where Scalar: TensorFlowFloatingPoint {
339339
/// - count: The number of values in the resulting sequence. `count` must be positive.
340340
@inlinable
341341
init(linearSpaceFrom start: Scalar, to end: Scalar, count: Int) {
342-
self = Raw.linSpace(
342+
self = _Raw.linSpace(
343343
start: Tensor(start), stop: Tensor(end), num: Tensor<Int32>(Int32(count)))
344344
}
345345

@@ -356,7 +356,7 @@ public extension Tensor where Scalar: TensorFlowFloatingPoint {
356356
/// - Precondition: `start`, `to`, and `count` must be Tensors containing a single Scalar value.
357357
@inlinable
358358
init(linearSpaceFrom start: Tensor<Scalar>, to end: Tensor<Scalar>, count: Tensor<Int32>) {
359-
self = Raw.linSpace(start: start, stop: end, num: count)
359+
self = _Raw.linSpace(start: start, stop: end, num: count)
360360
}
361361
}
362362

@@ -379,7 +379,7 @@ public extension Tensor where Scalar: TensorFlowIndex {
379379
upperBound: Tensor<Scalar> = Tensor<Scalar>(1),
380380
seed: TensorFlowSeed = Context.local.randomSeed
381381
) {
382-
self = Raw.statelessRandomUniformInt(
382+
self = _Raw.statelessRandomUniformInt(
383383
shape: Tensor<Int32>((0..<shape.rank).map { Int32(shape[$0]) }),
384384
seed: Tensor<Int32>([seed.graph, seed.op]),
385385
minval: lowerBound,
@@ -402,7 +402,7 @@ public extension Tensor where Scalar: TensorFlowFloatingPoint {
402402
upperBound: Tensor<Scalar> = Tensor<Scalar>(1),
403403
seed: TensorFlowSeed = Context.local.randomSeed
404404
) {
405-
let sample: Tensor<Scalar> = Raw.statelessRandomUniform(
405+
let sample: Tensor<Scalar> = _Raw.statelessRandomUniform(
406406
shape: Tensor<Int32>((0..<shape.rank).map { Int32(shape[$0]) }),
407407
seed: Tensor<Int32>([seed.graph, seed.op]))
408408
self = (upperBound - lowerBound) * sample + lowerBound
@@ -422,7 +422,7 @@ public extension Tensor where Scalar: TensorFlowFloatingPoint {
422422
standardDeviation: Tensor<Scalar> = Tensor<Scalar>(1),
423423
seed: TensorFlowSeed = Context.local.randomSeed
424424
) {
425-
let sample: Tensor<Scalar> = Raw.statelessRandomNormal(
425+
let sample: Tensor<Scalar> = _Raw.statelessRandomNormal(
426426
shape: Tensor<Int32>((0..<shape.rank).map { Int32(shape[$0]) }),
427427
seed: Tensor<Int32>([seed.graph, seed.op]))
428428
self = standardDeviation * sample + mean

Sources/TensorFlow/Layers/Upsampling.swift

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ public struct UpSampling3D<Scalar: TensorFlowFloatingPoint>: ParameterlessLayer
8383
private func repeatingElements(
8484
_ input: Tensor<Scalar>, alongAxis axis: Int, count: Int
8585
) -> Tensor<Scalar> {
86-
let splits = Raw.split(
86+
let splits = _Raw.split(
8787
splitDim: Tensor<Int32>(Int32(axis)),
8888
value: input,
8989
numSplit: Int64(input.shape[axis]))
@@ -96,7 +96,7 @@ public struct UpSampling3D<Scalar: TensorFlowFloatingPoint>: ParameterlessLayer
9696
) -> (Tensor<Scalar>, (Tensor<Scalar>) -> (TangentVector, Tensor<Scalar>)) {
9797
let value = repeatingElements(input, alongAxis: axis, count: count)
9898
return (value, { v in
99-
let splits = Raw.split(
99+
let splits = _Raw.split(
100100
splitDim: Tensor<Int32>(Int32(axis)),
101101
value: v,
102102
numSplit: Int64(input.shape[axis]))

Sources/TensorFlow/Loss.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -224,15 +224,15 @@ func softmaxCrossEntropyHelper<Scalar: TensorFlowFloatingPoint>(
224224
logits: Tensor<Scalar>,
225225
labels: Tensor<Int32>
226226
) -> Tensor<Scalar> {
227-
Raw.sparseSoftmaxCrossEntropyWithLogits(features: logits, labels: labels).loss
227+
_Raw.sparseSoftmaxCrossEntropyWithLogits(features: logits, labels: labels).loss
228228
}
229229

230230
@inlinable
231231
func _vjpSoftmaxCrossEntropyHelper<Scalar: TensorFlowFloatingPoint>(
232232
logits: Tensor<Scalar>,
233233
labels: Tensor<Int32>
234234
) -> (Tensor<Scalar>, (Tensor<Scalar>) -> Tensor<Scalar>) {
235-
let (loss, grad) = Raw.sparseSoftmaxCrossEntropyWithLogits(features: logits, labels: labels)
235+
let (loss, grad) = _Raw.sparseSoftmaxCrossEntropyWithLogits(features: logits, labels: labels)
236236
return (loss, { $0.expandingShape(at: -1) * grad })
237237
}
238238

@@ -258,15 +258,15 @@ func softmaxCrossEntropyHelper<Scalar: TensorFlowFloatingPoint>(
258258
logits: Tensor<Scalar>,
259259
probabilities: Tensor<Scalar>
260260
) -> Tensor<Scalar> {
261-
Raw.softmaxCrossEntropyWithLogits(features: logits, labels: probabilities).loss
261+
_Raw.softmaxCrossEntropyWithLogits(features: logits, labels: probabilities).loss
262262
}
263263

264264
@inlinable
265265
func _vjpSoftmaxCrossEntropyHelper<Scalar: TensorFlowFloatingPoint>(
266266
logits: Tensor<Scalar>,
267267
probabilities: Tensor<Scalar>
268268
) -> (Tensor<Scalar>, (Tensor<Scalar>) -> Tensor<Scalar>) {
269-
let (loss, grad) = Raw.softmaxCrossEntropyWithLogits(features: logits, labels: probabilities)
269+
let (loss, grad) = _Raw.softmaxCrossEntropyWithLogits(features: logits, labels: probabilities)
270270
return (loss, { $0.expandingShape(at: -1) * grad })
271271
}
272272

0 commit comments

Comments
 (0)