36
36
use std:: fmt;
37
37
use std:: future:: Future ;
38
38
use std:: marker:: PhantomData ;
39
+ use std:: ops:: Deref ;
39
40
use std:: panic:: { RefUnwindSafe , UnwindSafe } ;
40
41
use std:: rc:: Rc ;
41
42
use std:: sync:: atomic:: { AtomicBool , AtomicUsize , Ordering } ;
@@ -47,6 +48,7 @@ use async_task::{Builder, Runnable};
47
48
use concurrent_queue:: ConcurrentQueue ;
48
49
use futures_lite:: { future, prelude:: * } ;
49
50
use slab:: Slab ;
51
+ use thread_local:: ThreadLocal ;
50
52
51
53
#[ doc( no_inline) ]
52
54
pub use async_task:: Task ;
@@ -508,7 +510,7 @@ struct State {
508
510
queue : ConcurrentQueue < Runnable > ,
509
511
510
512
/// Local queues created by runners.
511
- local_queues : RwLock < Vec < Arc < ConcurrentQueue < Runnable > > > > ,
513
+ local_queues : ThreadLocal < LocalQueue > ,
512
514
513
515
/// Set to `true` when a sleeping ticker is notified or no tickers are sleeping.
514
516
notified : AtomicBool ,
@@ -525,7 +527,7 @@ impl State {
525
527
fn new ( ) -> State {
526
528
State {
527
529
queue : ConcurrentQueue :: unbounded ( ) ,
528
- local_queues : RwLock :: new ( Vec :: new ( ) ) ,
530
+ local_queues : ThreadLocal :: new ( ) ,
529
531
notified : AtomicBool :: new ( true ) ,
530
532
sleepers : Mutex :: new ( Sleepers {
531
533
count : 0 ,
@@ -756,9 +758,6 @@ struct Runner<'a> {
756
758
/// Inner ticker.
757
759
ticker : Ticker < ' a > ,
758
760
759
- /// The local queue.
760
- local : Arc < ConcurrentQueue < Runnable > > ,
761
-
762
761
/// Bumped every time a runnable task is found.
763
762
ticks : AtomicUsize ,
764
763
}
@@ -769,38 +768,36 @@ impl Runner<'_> {
769
768
let runner = Runner {
770
769
state,
771
770
ticker : Ticker :: new ( state) ,
772
- local : Arc :: new ( ConcurrentQueue :: bounded ( 512 ) ) ,
773
771
ticks : AtomicUsize :: new ( 0 ) ,
774
772
} ;
775
- state
776
- . local_queues
777
- . write ( )
778
- . unwrap ( )
779
- . push ( runner. local . clone ( ) ) ;
780
773
runner
781
774
}
782
775
783
776
/// Waits for the next runnable task to run.
784
777
async fn runnable ( & self , rng : & mut fastrand:: Rng ) -> Runnable {
778
+ let local_queue = self . state . local_queues . get_or_default ( ) ;
779
+
785
780
let runnable = self
786
781
. ticker
787
782
. runnable_with ( || {
783
+ let local_queue = self . state . local_queues . get_or_default ( ) ;
784
+
788
785
// Try the local queue.
789
- if let Ok ( r) = self . local . pop ( ) {
786
+ if let Ok ( r) = local_queue . pop ( ) {
790
787
return Some ( r) ;
791
788
}
792
789
793
790
// Try stealing from the global queue.
794
791
if let Ok ( r) = self . state . queue . pop ( ) {
795
- steal ( & self . state . queue , & self . local ) ;
792
+ steal ( & self . state . queue , & local_queue ) ;
796
793
return Some ( r) ;
797
794
}
798
795
799
796
// Try stealing from other runners.
800
- let local_queues = self . state . local_queues . read ( ) . unwrap ( ) ;
797
+ let local_queues = & self . state . local_queues ;
801
798
802
799
// Pick a random starting point in the iterator list and rotate the list.
803
- let n = local_queues. len ( ) ;
800
+ let n = local_queues. iter ( ) . count ( ) ;
804
801
let start = rng. usize ( ..n) ;
805
802
let iter = local_queues
806
803
. iter ( )
@@ -809,12 +806,12 @@ impl Runner<'_> {
809
806
. take ( n) ;
810
807
811
808
// Remove this runner's local queue.
812
- let iter = iter. filter ( |local| !Arc :: ptr_eq ( local, & self . local ) ) ;
809
+ let iter = iter. filter ( |local| !core :: ptr :: eq ( local, & local_queue ) ) ;
813
810
814
811
// Try stealing from each local queue in the list.
815
812
for local in iter {
816
- steal ( local, & self . local ) ;
817
- if let Ok ( r) = self . local . pop ( ) {
813
+ steal ( local, & local_queue ) ;
814
+ if let Ok ( r) = local_queue . pop ( ) {
818
815
return Some ( r) ;
819
816
}
820
817
}
@@ -828,7 +825,7 @@ impl Runner<'_> {
828
825
829
826
if ticks % 64 == 0 {
830
827
// Steal tasks from the global queue to ensure fair task scheduling.
831
- steal ( & self . state . queue , & self . local ) ;
828
+ steal ( & self . state . queue , & local_queue ) ;
832
829
}
833
830
834
831
runnable
@@ -838,14 +835,10 @@ impl Runner<'_> {
838
835
impl Drop for Runner < ' _ > {
839
836
fn drop ( & mut self ) {
840
837
// Remove the local queue.
841
- self . state
842
- . local_queues
843
- . write ( )
844
- . unwrap ( )
845
- . retain ( |local| !Arc :: ptr_eq ( local, & self . local ) ) ;
838
+ let local_queue = self . state . local_queues . get_or_default ( ) ;
846
839
847
840
// Re-schedule remaining tasks in the local queue.
848
- while let Ok ( r) = self . local . pop ( ) {
841
+ while let Ok ( r) = local_queue . pop ( ) {
849
842
r. schedule ( ) ;
850
843
}
851
844
}
@@ -937,11 +930,26 @@ fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_
937
930
f. debug_struct ( name)
938
931
. field ( "active" , & ActiveTasks ( & state. active ) )
939
932
. field ( "global_tasks" , & state. queue . len ( ) )
940
- . field ( "local_runners" , & LocalRunners ( & state. local_queues ) )
941
933
. field ( "sleepers" , & SleepCount ( & state. sleepers ) )
942
934
. finish ( )
943
935
}
944
936
937
+ struct LocalQueue ( ConcurrentQueue < Runnable > ) ;
938
+
939
+ impl Default for LocalQueue {
940
+ fn default ( ) -> Self {
941
+ Self ( ConcurrentQueue :: bounded ( 512 ) )
942
+ }
943
+ }
944
+
945
+ impl Deref for LocalQueue {
946
+ type Target = ConcurrentQueue < Runnable > ;
947
+
948
+ fn deref ( & self ) -> & Self :: Target {
949
+ & self . 0
950
+ }
951
+ }
952
+
945
953
/// Runs a closure when dropped.
946
954
struct CallOnDrop < F : FnMut ( ) > ( F ) ;
947
955
0 commit comments