@@ -32,26 +32,6 @@ import Glibc
32
32
#endif
33
33
import CTensorFlow
34
34
35
- // @_frozen // SR-9739
36
- public enum _ExecutionMode : Equatable {
37
- /// CPU or GPU execution.
38
- case auto
39
- /// TPU execution.
40
- // TODO: assess if we can pass this bit of info from compiler settings (when
41
- // enableTPU() is called), and avoid having this additional runtime bit.
42
- case tpu
43
- /// XLA jit-compilation backend (will use GPU when available, and otherwise
44
- /// CPU).
45
- case xla
46
-
47
- public var isTPU : Bool {
48
- switch self {
49
- case . tpu: return true
50
- default : return false
51
- }
52
- }
53
- }
54
-
55
35
/// TraceContext contains the state needed to build a trace graph function
56
36
/// (TF_Function). As eager ops are executed in tracing mode, their
57
37
/// corresponding nodes are added to the trace graph (via
@@ -444,10 +424,6 @@ public enum _RuntimeConfig {
444
424
/// tensor program in this process.
445
425
static public var tensorFlowRuntimeInitialized = false
446
426
447
- /// For CPU and GPU execution without XLA, use the auto mode. For XLA and/or
448
- /// TPU execution, set the enum value accordingly.
449
- static public var executionMode : _ExecutionMode = . auto
450
-
451
427
/// When true, let TensorFlow GPU memory allocation start small and grow as
452
428
/// needed. Otherwise, The entire GPU memory region is pre-allocated.
453
429
static public var gpuMemoryAllowGrowth = true
@@ -505,12 +481,6 @@ private func configureRuntimeFromEnvironment() {
505
481
debugLog ( " Setting TF logging verbose level to \( verboseLevel) from env. " )
506
482
}
507
483
508
- if let value = getenv ( " SWIFT_TENSORFLOW_USE_TPU_INFEED " ) ,
509
- String ( cString: value) . lowercased ( ) == " true " {
510
- _RuntimeConfig. executionMode = . tpu
511
- debugLog ( " Setting TPU execution with infeed from env. " )
512
- }
513
-
514
484
if let value = getenv ( " SWIFT_TENSORFLOW_SERVER_ADDRESS " ) {
515
485
let address = String ( cString: value)
516
486
debugLog ( " Env var SWIFT_TENSORFLOW_SERVER_ADDRESS has value \( address) . " )
@@ -563,23 +533,6 @@ private func configureRuntimeFromEnvironment() {
563
533
}
564
534
}
565
535
566
- /// Initialize the TPU system.
567
- /// - Note: This should be called only once.
568
- /// - Precondition: The given session must contain the given graph.
569
- // TODO(b/77572335): Reassess how to reset TPU after execution error.
570
- private func initializeTPU( withSession session: CTFSession , graph: CTFGraph ,
571
- status: CTFStatus ) {
572
- debugLog ( " Initializing TPU. " )
573
- let configOp = TF_GraphOperationByName ( graph, " ConfigureDistributedTPU " )
574
- internalConsistencyCheck ( configOp != nil )
575
- var configNode = TF_Output ( oper: configOp, index: 0 )
576
- var dummyOutput : CTensor ?
577
- TF_SessionRun ( session, nil , nil , nil , 0 , & configNode, & dummyOutput, 1 , nil ,
578
- 0 , nil , status)
579
- checkOk ( status)
580
- TF_DeleteTensor ( dummyOutput)
581
- }
582
-
583
536
/// The host of any tensor computation.
584
537
@_fixed_layout
585
538
public final class _ExecutionContext {
@@ -594,9 +547,6 @@ public final class _ExecutionContext {
594
547
/// Only set when there is some usable GPU.
595
548
fileprivate let gpuDeviceNamePrefix : String ?
596
549
597
- /// Only set when there is some usable TPU.
598
- fileprivate let tpuDeviceNamePrefix : String ?
599
-
600
550
/// The buffer storing a serialized TensorFlow config proto.
601
551
public let tensorFlowConfig : UnsafeMutablePointer < TF_Buffer >
602
552
@@ -632,14 +582,11 @@ public final class _ExecutionContext {
632
582
}
633
583
634
584
// Create TF config object.
635
- if _RuntimeConfig. executionMode == . xla {
636
- debugLog ( " Enable XLA execution. " )
637
- }
638
585
if _RuntimeConfig. gpuMemoryAllowGrowth {
639
586
debugLog ( " Allowing growth for GPU memory allocator. " )
640
587
}
641
588
self . tensorFlowConfig = TF_CreateConfig (
642
- _RuntimeConfig . executionMode == . xla ? 1 : 0 ,
589
+ /* enable_xla_compilation */ 0 ,
643
590
_RuntimeConfig. gpuMemoryAllowGrowth ? 1 : 0 ,
644
591
_RuntimeConfig. cpuDeviceCount)
645
592
TFE_ContextOptionsSetConfig ( opts,
@@ -666,9 +613,6 @@ public final class _ExecutionContext {
666
613
}
667
614
668
615
// Initialize GPU device.
669
- // While the code here is only needed when _RuntimeConfig.executionMode is
670
- // set to .gpu, running it in all code paths helps keep things simple
671
- // (e.g. so that the cpuDeviceNamePrefix property is always set.)
672
616
let devices = TFE_ContextListDevices ( eagerContext, status)
673
617
checkOk ( status)
674
618
defer { TF_DeleteDeviceList ( devices!) }
@@ -679,7 +623,6 @@ public final class _ExecutionContext {
679
623
debugLog ( " There are \( deviceCount) devices. " )
680
624
var foundCPU = false
681
625
var gpuCount = 0
682
- var tpuCount = 0
683
626
for deviceId in 0 ..< deviceCount {
684
627
let cDeviceName = TF_DeviceListName ( devices, deviceId, status)
685
628
checkOk ( status)
@@ -696,9 +639,6 @@ public final class _ExecutionContext {
696
639
if deviceType == " GPU " {
697
640
gpuCount += 1
698
641
}
699
- if deviceType == " TPU " {
700
- tpuCount += 1
701
- }
702
642
}
703
643
guard foundCPU else {
704
644
fatalError ( " CPU should always be an available device. " )
@@ -712,14 +652,6 @@ public final class _ExecutionContext {
712
652
self . gpuDeviceNamePrefix = nil
713
653
}
714
654
715
- if tpuCount > 0 {
716
- // According to server def generated when you set
717
- // SWIFT_TENSORFLOW_SERVER_ADDRESS, the TPUs will all be on task 1.
718
- self . tpuDeviceNamePrefix = " /job:localhost/replica:0/task:1/device:TPU: "
719
- } else {
720
- self . tpuDeviceNamePrefix = nil
721
- }
722
-
723
655
// Initialize the mutex.
724
656
pthread_mutex_init ( & mutex, nil )
725
657
}
@@ -1063,8 +995,6 @@ internal extension _ExecutionContext {
1063
995
return " \( cpuDeviceNamePrefix) \( index) "
1064
996
case . gpu:
1065
997
return " \( gpuDeviceNamePrefix!) \( index) "
1066
- case . tpu:
1067
- return " \( tpuDeviceNamePrefix!) \( index) "
1068
998
}
1069
999
}
1070
1000
return nil
0 commit comments