@@ -553,7 +553,7 @@ dispatch_assert_queue(dispatch_queue_t dq)
553
553
if (unlikely (_dq_state_drain_pended (dq_state ))) {
554
554
goto fail ;
555
555
}
556
- if (likely (_dq_state_drain_owner (dq_state ) == _dispatch_thread_port ())) {
556
+ if (likely (_dq_state_drain_owner (dq_state ) == _dispatch_tid_self ())) {
557
557
return ;
558
558
}
559
559
if (likely (dq -> dq_width > 1 )) {
@@ -580,7 +580,7 @@ dispatch_assert_queue_not(dispatch_queue_t dq)
580
580
if (_dq_state_drain_pended (dq_state )) {
581
581
return ;
582
582
}
583
- if (likely (_dq_state_drain_owner (dq_state ) != _dispatch_thread_port ())) {
583
+ if (likely (_dq_state_drain_owner (dq_state ) != _dispatch_tid_self ())) {
584
584
if (likely (dq -> dq_width == 1 )) {
585
585
// we can look at the width: if it is changing while we read it,
586
586
// it means that a barrier is running on `dq` concurrently, which
@@ -1602,7 +1602,7 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate)
1602
1602
DISPATCH_QUEUE_WIDTH_FULL_BIT ) {
1603
1603
value = full_width ;
1604
1604
value &= ~DISPATCH_QUEUE_DIRTY ;
1605
- value |= _dispatch_thread_port ();
1605
+ value |= _dispatch_tid_self ();
1606
1606
}
1607
1607
}
1608
1608
}
@@ -2334,7 +2334,7 @@ _dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID(
2334
2334
DISPATCH_CLIENT_CRASH (dq -> dq_width , "Invalid queue type" );
2335
2335
}
2336
2336
uint64_t dq_state = os_atomic_load2o (dq , dq_state , relaxed );
2337
- return _dq_state_drain_locked_by (dq_state , _dispatch_thread_port ());
2337
+ return _dq_state_drain_locked_by (dq_state , _dispatch_tid_self ());
2338
2338
}
2339
2339
#endif
2340
2340
@@ -2471,7 +2471,7 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp,
2471
2471
pflags |= _PTHREAD_SET_SELF_QOS_FLAG ;
2472
2472
}
2473
2473
if (unlikely (DISPATCH_QUEUE_DRAIN_OWNER (& _dispatch_mgr_q ) ==
2474
- _dispatch_thread_port ())) {
2474
+ _dispatch_tid_self ())) {
2475
2475
DISPATCH_INTERNAL_CRASH (pp ,
2476
2476
"Changing the QoS while on the manager queue" );
2477
2477
}
@@ -2773,7 +2773,7 @@ _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd)
2773
2773
v = dbpd -> dbpd_voucher ;
2774
2774
}
2775
2775
ov = _dispatch_adopt_priority_and_set_voucher (p , v , adopt_flags );
2776
- dbpd -> dbpd_thread = _dispatch_thread_port ();
2776
+ dbpd -> dbpd_thread = _dispatch_tid_self ();
2777
2777
_dispatch_client_callout (dbpd -> dbpd_block ,
2778
2778
_dispatch_Block_invoke (dbpd -> dbpd_block ));
2779
2779
_dispatch_reset_priority_and_voucher (op , ov );
@@ -3123,7 +3123,7 @@ _dispatch_async_redirect_invoke(dispatch_continuation_t dc,
3123
3123
old_dp = _dispatch_set_defaultpriority (dq -> dq_priority , & dp );
3124
3124
op = dq -> dq_override ;
3125
3125
if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK )) {
3126
- _dispatch_wqthread_override_start (_dispatch_thread_port (), op );
3126
+ _dispatch_wqthread_override_start (_dispatch_tid_self (), op );
3127
3127
// Ensure that the root queue sees that this thread was overridden.
3128
3128
_dispatch_set_defaultpriority_override ();
3129
3129
}
@@ -3543,7 +3543,7 @@ _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt,
3543
3543
_dispatch_introspection_barrier_sync_begin (dq , func );
3544
3544
}
3545
3545
#endif
3546
- uint32_t th_self = _dispatch_thread_port ();
3546
+ uint32_t th_self = _dispatch_tid_self ();
3547
3547
struct dispatch_continuation_s dbss = {
3548
3548
.dc_flags = DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT ,
3549
3549
.dc_func = _dispatch_barrier_sync_f_slow_invoke ,
@@ -3709,7 +3709,7 @@ _dispatch_non_barrier_complete(dispatch_queue_t dq)
3709
3709
DISPATCH_QUEUE_WIDTH_FULL_BIT ) {
3710
3710
new_state = full_width ;
3711
3711
new_state &= ~DISPATCH_QUEUE_DIRTY ;
3712
- new_state |= _dispatch_thread_port ();
3712
+ new_state |= _dispatch_tid_self ();
3713
3713
}
3714
3714
}
3715
3715
}
@@ -3736,7 +3736,7 @@ _dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
3736
3736
}
3737
3737
dispatch_thread_event_s event ;
3738
3738
_dispatch_thread_event_init (& event );
3739
- uint32_t th_self = _dispatch_thread_port ();
3739
+ uint32_t th_self = _dispatch_tid_self ();
3740
3740
struct dispatch_continuation_s dc = {
3741
3741
.dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT ,
3742
3742
#if DISPATCH_INTROSPECTION
@@ -4430,7 +4430,7 @@ _dispatch_main_queue_drain(void)
4430
4430
" after dispatch_main()" );
4431
4431
}
4432
4432
mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER (dq );
4433
- if (slowpath (owner != _dispatch_thread_port ())) {
4433
+ if (slowpath (owner != _dispatch_tid_self ())) {
4434
4434
DISPATCH_CLIENT_CRASH (owner , "_dispatch_main_queue_callback_4CF called"
4435
4435
" from the wrong thread" );
4436
4436
}
@@ -4655,7 +4655,7 @@ _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq,
4655
4655
}
4656
4656
4657
4657
if (dq ) {
4658
- uint32_t self = _dispatch_thread_port ();
4658
+ uint32_t self = _dispatch_tid_self ();
4659
4659
os_atomic_rmw_loop2o (dq , dq_state , old_state , new_state , release ,{
4660
4660
new_state = old_state ;
4661
4661
if (!_dq_state_drain_pended (old_state ) ||
@@ -5091,7 +5091,7 @@ _dispatch_queue_class_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
5091
5091
uint64_t pending_barrier_width =
5092
5092
(dq -> dq_width - 1 ) * DISPATCH_QUEUE_WIDTH_INTERVAL ;
5093
5093
uint64_t xor_owner_and_set_full_width_and_in_barrier =
5094
- _dispatch_thread_port () | DISPATCH_QUEUE_WIDTH_FULL_BIT |
5094
+ _dispatch_tid_self () | DISPATCH_QUEUE_WIDTH_FULL_BIT |
5095
5095
DISPATCH_QUEUE_IN_BARRIER ;
5096
5096
5097
5097
#ifdef DLOCK_NOWAITERS_BIT
0 commit comments