@@ -51,16 +51,16 @@ public extension Layer {
51
51
@differentiating ( inferring ( from: ) )
52
52
@usableFromInline
53
53
internal func _vjpInferring( from input: Input )
54
- -> ( value: Output , pullback: ( Output . CotangentVector )
55
- -> ( CotangentVector , Input . CotangentVector ) ) {
54
+ -> ( value: Output , pullback: ( Output . TangentVector )
55
+ -> ( TangentVector , Input . TangentVector ) ) {
56
56
return withLearningPhase ( LearningPhase . inference) {
57
57
let ( output, pullback) = appliedForBackpropagation ( to: input)
58
58
return ( output, { v in pullback ( v) } )
59
59
}
60
60
}
61
61
62
- typealias Backpropagator = ( _ direction: Output . CotangentVector )
63
- -> ( layerGradient: CotangentVector , inputGradient: Input . CotangentVector )
62
+ typealias Backpropagator = ( _ direction: Output . TangentVector )
63
+ -> ( layerGradient: TangentVector , inputGradient: Input . TangentVector )
64
64
65
65
/// Returns the inference output and the backpropagation function obtained from applying the
66
66
/// layer to the given input.
@@ -728,7 +728,7 @@ public struct BatchNorm<Scalar: TensorFlowFloatingPoint>: Layer {
728
728
@usableFromInline
729
729
func _vjpApplied( to input: Tensor < Scalar > ) ->
730
730
( Tensor < Scalar > , ( Tensor < Scalar > ) ->
731
- ( BatchNorm < Scalar > . CotangentVector , Tensor < Scalar > ) ) {
731
+ ( BatchNorm < Scalar > . TangentVector , Tensor < Scalar > ) ) {
732
732
switch Context . local. learningPhase {
733
733
case . training:
734
734
return valueWithPullback ( at: input) {
@@ -1086,7 +1086,7 @@ public struct Dropout<Scalar: TensorFlowFloatingPoint>: Layer {
1086
1086
@usableFromInline
1087
1087
func _vjpApplied( to input: Tensor < Scalar > ) ->
1088
1088
( Tensor < Scalar > , ( Tensor < Scalar > ) ->
1089
- ( Dropout < Scalar > . CotangentVector , Tensor < Scalar > ) ) {
1089
+ ( Dropout < Scalar > . TangentVector , Tensor < Scalar > ) ) {
1090
1090
switch Context . local. learningPhase {
1091
1091
case . training:
1092
1092
return valueWithPullback ( at: input) {
@@ -1435,8 +1435,8 @@ public struct RNN<Cell: RNNCell>: Layer {
1435
1435
internal func _vjpCall(
1436
1436
_ inputs: [ Cell . TimeStepInput ] , initialState: Cell . State
1437
1437
) -> ( [ Cell . TimeStepOutput ] ,
1438
- ( Array < Cell . TimeStepOutput > . CotangentVector )
1439
- -> ( CotangentVector , Array < Cell . TimeStepInput > . CotangentVector ) ) {
1438
+ ( Array < Cell . TimeStepOutput > . TangentVector )
1439
+ -> ( TangentVector , Array < Cell . TimeStepInput > . TangentVector ) ) {
1440
1440
let timeStepCount = inputs. count
1441
1441
var currentHiddenState = cell. zeroState
1442
1442
var timeStepOutputs : [ Cell . TimeStepOutput ] = [ ]
@@ -1454,9 +1454,9 @@ public struct RNN<Cell: RNNCell>: Layer {
1454
1454
return ( timeStepOutputs, { 𝛁outputs in
1455
1455
precondition ( 𝛁outputs. base. count == timeStepCount,
1456
1456
" The number of output gradients must equal the number of time steps " )
1457
- var 𝛁cell = Cell. CotangentVector . zero
1458
- var 𝛁state = Cell. State. CotangentVector . zero
1459
- var reversed 𝛁inputs: [ Cell . TimeStepInput . CotangentVector ] = [ ]
1457
+ var 𝛁cell = Cell. TangentVector . zero
1458
+ var 𝛁state = Cell. State. TangentVector . zero
1459
+ var reversed 𝛁inputs: [ Cell . TimeStepInput . TangentVector ] = [ ]
1460
1460
reversed 𝛁inputs. reserveCapacity ( timeStepCount)
1461
1461
for (𝛁output, backpropagator) in zip( 𝛁outputs. base, backpropagators) . reversed( ) {
1462
1462
let ( new𝛁cell, 𝛁in put) = backpropagator ( . init( output: 𝛁output, state: 𝛁state) )
0 commit comments