preface
In the last iOS multithreading (2) : GCD foundation & source code analysis, we carried out some preliminary exploration of GCD source code, today will be more in-depth analysis of the source code.
To prepare
- Libdispatch source
Deadlock source analysis
Look at this code:
- (void)deadlock {dispatch_queue_t queue = dispatch_queue_create(" SSL ", DISPATCH_QUEUE_SERIAL); / / an asynchronous function dispatch_async (queue, ^ {/ / synchronization function dispatch_sync (queue, ^ {NSLog (@ "?????????" ); }); }); }Copy the code
- This code will deadlock, which is related to
Serial queues
anddispatch_sync
There is a lot of relationship, the following through the source code to see what is the reason.
Enter dispatch_sync -> _dispatch_sync_f -> _dispatch_sync_f_inline:
static inline void _dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, uintptr_t dc_flags) { if (likely(dq->dq_width == 1)) { return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags); }... }Copy the code
- Because it’s a serial queue, so
dq->dq_width == 1
Was established.
Enter the _dispatch_barrier_sync_f:
static void
_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func, uintptr_t dc_flags)
{
_dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags);
}
Copy the code
Enter the _dispatch_barrier_sync_f_inline:
static inline void _dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, uintptr_t dc_flags) { dispatch_tid tid = _dispatch_tid_self(); if (unlikely(dx_metatype(dq) ! = _DISPATCH_LANE_TYPE)) { DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync"); } dispatch_lane_t dl = upcast(dq)._dl; if (unlikely(! _dispatch_queue_try_acquire_barrier_sync(dl, tid))) { return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl, DC_FLAG_BARRIER | dc_flags); } if (unlikely(dl->do_targetq->do_targetq)) { return _dispatch_sync_recurse(dl, ctxt, func, DC_FLAG_BARRIER | dc_flags); } _dispatch_introspection_sync_begin(dl); _dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop( dq, ctxt, func, dc_flags | DC_FLAG_BARRIER))); }Copy the code
- There are multiple functions
return
I don’t know where I’ll go.
Add a symbol breakpoint and run the program:
- You can see it’s gone
_dispatch_sync_f_slow
In the__DISPATCH_WAIT_FOR_QUEUE__
Function.
Check the _dispatch_sync_f_slow:
static void _dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt, dispatch_function_t func, uintptr_t top_dc_flags, dispatch_queue_class_t dqu, uintptr_t dc_flags) { ... dispatch_queue_t dq = dqu._dq; pthread_priority_t pp = _dispatch_get_priority(); struct dispatch_sync_context_s dsc = { .dc_flags = DC_FLAG_SYNC_WAITER | dc_flags, .dc_func = _dispatch_async_and_wait_invoke, .dc_ctxt = &dsc, .dc_other = top_dq, .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, .dc_voucher = _voucher_get(), .dsc_func = func, .dsc_ctxt = ctxt, .dsc_waiter = _dispatch_tid_self(), }; _dispatch_trace_item_push(top_dq, &dsc); __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq); . }Copy the code
Enter the __DISPATCH_WAIT_FOR_QUEUE__ :
static void __DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq) { uint64_t dq_state = _dispatch_wait_prepare(dq); Dq = dq; dq = dq; If (UNLIKELY (_dq_state_DRAIN_lockeD_BY (dq_state, DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, uintptr_t) "dispatch_sync called on queue " "already owned by current thread"); }... }Copy the code
dsc->dsc_waiter
:_dispatch_sync_f_slow
->.dsc_waiter = _dispatch_tid_self()
->#define _dispatch_tid_self() ((dispatch_tid)_dispatch_thread_port())
-> Current threadid
.
Go to _dq_state_drain_locked_BY -> _dispatch_lock_is_lockeD_BY:
static inline bool
_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
{
// equivalent to _dispatch_lock_owner(lock_value) == tid
return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
}
Copy the code
DLOCK_OWNER_MASK
Is a very large value:#define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc)
.DLOCK_OWNER_MASK
Big enough, as long as(lock_value ^ tid)
Don’t for0
The whole expression will not be zero0
.lock_value
andtid
Equal,(lock_value ^ tid)
The value is0
That is, when the execution and waiting are in the same queue.
Two, asynchronous function analysis
Asynchronous function + concurrent queue:
dispatch_queue_t queue = dispatch_queue_create("ssl", DISPATCH_QUEUE_CONCURRENT); Dispatch_async (queue, ^{NSLog(@"SSL function analysis "); });Copy the code
Dispatch_async -> _dispatch_continuation_async -> dx_push -> dq_push:
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, lane,
.do_type = DISPATCH_QUEUE_SERIAL_TYPE,
.do_dispose = _dispatch_lane_dispose,
.do_debug = _dispatch_queue_debug,
.do_invoke = _dispatch_lane_invoke,
.dq_activate = _dispatch_lane_activate,
.dq_wakeup = _dispatch_lane_wakeup,
.dq_push = _dispatch_lane_push,
);
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
.do_type = DISPATCH_QUEUE_CONCURRENT_TYPE,
.do_dispose = _dispatch_lane_dispose,
.do_debug = _dispatch_queue_debug,
.do_invoke = _dispatch_lane_invoke,
.dq_activate = _dispatch_lane_activate,
.dq_wakeup = _dispatch_lane_wakeup,
.dq_push = _dispatch_lane_concurrent_push,
);
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane,
.do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
.do_dispose = _dispatch_object_no_dispose,
.do_debug = _dispatch_queue_debug,
.do_invoke = _dispatch_object_no_invoke,
.dq_activate = _dispatch_queue_no_activate,
.dq_wakeup = _dispatch_root_queue_wakeup,
.dq_push = _dispatch_root_queue_push,
);
Copy the code
_dispatch_root_queue_push
Is an assignment to the global concurrent queue,In the previousWe’ve done some analysis,_dispatch_lane_concurrent_push
It’s an assignment to a normal concurrent queue, and we’re going to analyze that.
Enter the _dispatch_lane_concurrent_push:
void _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, dispatch_qos_t qos) { // <rdar://problem/24738102&24743140> reserving non barrier width // doesn't fail if only the ENQUEUED bit is set (unlike its barrier // width equivalent), so we have to check that this thread hasn't // enqueued anything ahead of this call or we can break ordering if (dq->dq_items_tail == NULL && ! _dispatch_object_is_waiter(dou) && ! _dispatch_object_is_barrier(dou) && _dispatch_queue_try_acquire_async(dq)) { return _dispatch_continuation_redirect_push(dq, dou, qos); } _dispatch_lane_push(dq, dou, qos); }Copy the code
Enter the _dispatch_lane_push:
DISPATCH_NOINLINE void _dispatch_lane_push(dispatch_lane_t dq, dispatch_object_t dou, dispatch_qos_t qos) { dispatch_wakeup_flags_t flags = 0; struct dispatch_object_s *prev; if (unlikely(_dispatch_object_is_waiter(dou))) { return _dispatch_lane_push_waiter(dq, dou._dsc, qos); } dispatch_assert(! _dispatch_object_is_global(dq)); qos = _dispatch_queue_push_qos(dq, qos); // If we are going to call dx_wakeup(), the queue must be retained before // the item we're pushing can be dequeued, which means: // - before we exchange the tail if we have to override // - before we set the head if we made the queue non empty. // Otherwise, if preempted between one of these and the call to dx_wakeup() // the blocks submitted to the queue may release the last reference to the // queue when invoked by _dispatch_lane_drain. <rdar://problem/6932776> prev = os_mpsc_push_update_tail(os_mpsc(dq, dq_items), dou._do, do_next); if (unlikely(os_mpsc_push_was_empty(prev))) { _dispatch_retain_2_unsafe(dq); flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY; } else if (unlikely(_dispatch_queue_need_override(dq, qos))) { // There's a race here, _dispatch_queue_need_override may read a stale // dq_state value. // // If it's a stale load from the same drain streak, given that // the max qos is monotonic, too old a read can only cause an // unnecessary attempt at overriding which is harmless. // // We'll assume here that a stale load from an a previous drain streak // never happens in practice. _dispatch_retain_2_unsafe(dq); flags = DISPATCH_WAKEUP_CONSUME_2; } os_mpsc_push_update_prev(os_mpsc(dq, dq_items), prev, dou._do, do_next); if (flags) { return dx_wakeup(dq, qos, flags); }}Copy the code
- You can see that there is
_dispatch_lane_push_waiter
anddx_wakeup
tworeturn
.
Check the dx_wakeup:
#define dx_wakeup(x, y, z) dx_vtable(x)->dq_wakeup(x, y, z)
Copy the code
Check the dq_wakeup:
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
.do_type = DISPATCH_QUEUE_CONCURRENT_TYPE,
.do_dispose = _dispatch_lane_dispose,
.do_debug = _dispatch_queue_debug,
.do_invoke = _dispatch_lane_invoke,
.dq_activate = _dispatch_lane_activate,
.dq_wakeup = _dispatch_lane_wakeup,
.dq_push = _dispatch_lane_concurrent_push,
);
Copy the code
Add breakpoints for _dispatch_lane_push_waiter and _dispatch_lane_wakeup and run the program:
- You can see that the program is finally invoked
_dispatch_lane_wakeup
Function.
Enter the _dispatch_lane_wakeup:
void
_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags)
{
dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;
if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
return _dispatch_lane_barrier_complete(dqu, qos, flags);
}
if (_dispatch_queue_class_probe(dqu)) {
target = DISPATCH_QUEUE_WAKEUP_TARGET;
}
return _dispatch_queue_wakeup(dqu, qos, flags, target);
}
Copy the code
Enter the _dispatch_queue_wakeup:
void _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) { dispatch_queue_t dq = dqu._dq; uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED; dispatch_assert(target ! = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT); if (target && ! (flags & DISPATCH_WAKEUP_CONSUME_2)) { _dispatch_retain_2(dq); flags |= DISPATCH_WAKEUP_CONSUME_2; } if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { dispatch_assert(dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); qos = _dispatch_queue_wakeup_qos(dq, qos); return _dispatch_lane_class_barrier_complete(upcast(dq)._dl, qos, flags, target, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED); }... }Copy the code
Enter the _dispatch_lane_class_barrier_complete:
static void _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target, uint64_t owned) { again: os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq); _dispatch_queue_move_to_contended_sync(dq->_as_dq); os_atomic_rmw_loop_give_up(goto again); } new_state = _dq_state_merge_qos(old_state - owned, qos); new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; if (unlikely(_dq_state_is_suspended(old_state))) { if (likely(_dq_state_is_base_wlh(old_state))) { new_state &= ~DISPATCH_QUEUE_ENQUEUED; } } else if (enqueue) { if (! _dq_state_is_enqueued(old_state)) { new_state |= enqueue; } } else if (unlikely(_dq_state_is_dirty(old_state))) { os_atomic_rmw_loop_give_up({ os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; return dx_wakeup(dq, qos, flags); }); } else { new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; }}); if (tq) { if (likely((old_state ^ new_state) & enqueue)) { dispatch_assert(_dq_state_is_enqueued(new_state)); dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); return _dispatch_queue_push_queue(tq, dq, new_state); }}... }Copy the code
Enter the _dispatch_root_queue_push:
void
_dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou,
dispatch_qos_t qos)
{
...
_dispatch_root_queue_push_inline(rq, dou, dou, 1);
}
Copy the code
Enter the _dispatch_root_queue_push_inline:
static inline void _dispatch_root_queue_push_inline(dispatch_queue_global_t dq, dispatch_object_t _head, dispatch_object_t _tail, int n) { struct dispatch_object_s *hd = _head._do, *tl = _tail._do; if (unlikely(os_mpsc_push_list(os_mpsc(dq, dq_items), hd, tl, do_next))) { return _dispatch_root_queue_poke(dq, n, 0); }}Copy the code
Enter the _dispatch_root_queue_poke:
void
_dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor)
{
return _dispatch_root_queue_poke_slow(dq, n, floor);
}
Copy the code
Enter the _dispatch_root_queue_poke_slow:
static void _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) { _dispatch_root_queues_init(); . }Copy the code
Enter the _dispatch_root_queues_init:
static inline void
_dispatch_root_queues_init(void)
{
dispatch_once_f(&_dispatch_root_queues_pred, NULL,
_dispatch_root_queues_init_once);
}
Copy the code
Enter the _dispatch_root_queues_init_once:
static void _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) { _dispatch_fork_becomes_unsafe(); #if DISPATCH_USE_INTERNAL_WORKQUEUE size_t i; For (I = 0; I = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { _dispatch_root_queue_init_pthread_pool(&_dispatch_root_queues[i], 0, _dispatch_root_queues[i].dq_priority); } #else int wq_supported = _pthread_workqueue_supported(); int r = ENOTSUP; If DISPATCH_USE_KEVENT_SETUP // Struct pthread_workqueue_config CFG = {.version = PTHREAD_WORKQUEUE_CONFIG_VERSION, .flags = 0, .workq_cb = 0, .kevent_cb = 0, .workloop_cb = 0, .queue_serialno_offs = dispatch_queue_offsets.dqo_serialnum, #if PTHREAD_WORKQUEUE_CONFIG_VERSION >= 2 .queue_label_offs = dispatch_queue_offsets.dqo_label, #endif }; #endif cfg.workq_cb = _dispatch_worker_thread2; r = pthread_workqueue_setup(&cfg, sizeof(cfg)); #else r = _pthread_workqueue_init(_dispatch_worker_thread2, offsetof(struct dispatch_queue_s, dq_serialnum), 0); #endif // DISPATCH_USE_KEVENT_SETUP #if DISPATCH_USE_KEVENT_WORKLOOP } else if (wq_supported & WORKQ_FEATURE_WORKLOOP) { #if DISPATCH_USE_KEVENT_SETUP cfg.workq_cb = _dispatch_worker_thread2; cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; cfg.workloop_cb = (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread; r = pthread_workqueue_setup(&cfg, sizeof(cfg)); #else // wrap it with pThread, which determines when to execute according to OS, and is handled by CPU control. R = _pthread_workqueue_init_with_workloop(_dispatch_worker_thread2, (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread, (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread, offsetof(struct dispatch_queue_s, dq_serialnum), 0); #endif // DISPATCH_USE_KEVENT_SETUP #endif // DISPATCH_USE_KEVENT_WORKLOOP }Copy the code
_dispatch_root_queues_init, then go back to _dispatch_root_queue_poke_slow:
static void _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) { int remaining = n; _dispatch_root_queues_init(); _dispatch_debug_root_queue(dq, __func__); _dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n); #if ! DISPATCH_USE_INTERNAL_WORKQUEUE #if DISPATCH_USE_PTHREAD_ROOT_QUEUES // Global queues if (dx_type(dq) == #endif {// Create a thread to execute _dispatch_root_queue_debug("requesting new worker thread for ") global " "queue: %p", dq); r = _pthread_workqueue_addthreads(remaining, _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); (void)dispatch_assume_zero(r); return; } #endif // ! DISPATCH_USE_INTERNAL_WORKQUEUE #if DISPATCH_USE_PTHREAD_POOL dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; if (likely(pqc->dpq_thread_mediator.do_vtable)) { while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) { _dispatch_root_queue_debug("signaled sleeping worker for " "global queue: %p", dq); if (! --remaining) { return; } } } int can_request, t_count; // seq_cst with atomic store to tail <rdar:// Problem /16932833> T_count = OS_atomIC_load2O (dq, DGq_thread_pool_size, ordered); Do {// floor = 0 or 1 can_request = t_count < floor? 0 : t_count - floor; If (Remaining > CAN_request) {// Reporting an exception. _dispatch_root_queue_debug(" Pthread pool Reducing request from %d to %d", remaining, can_request); os_atomic_sub2o(dq, dgq_pending, remaining - can_request, relaxed); remaining = can_request; } if (remaining == 0) { _dispatch_root_queue_debug("pthread pool is full for root queue: " "%p", dq); return; } // Some processing of thread pool, dgq_thread_pool_size initial value is 1} while (! os_atomic_cmpxchgv2o(dq, dgq_thread_pool_size, t_count, t_count - remaining, &t_count, acquire)); #if ! defined(_WIN32) pthread_attr_t *attr = &pqc->dpq_thread_attr; pthread_t tid, *pthr = &tid; #if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES if (unlikely(dq == &_dispatch_mgr_root_queue)) { pthr = _dispatch_mgr_root_queue_init(); } #endif do { _dispatch_retain(dq); // released in _dispatch_worker_thread while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) { if (r ! = EAGAIN) { (void)dispatch_assume_zero(r); } _dispatch_temporary_resource_shortage(); } } while (--remaining); #else // defined(_WIN32) #if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES if (unlikely(dq == &_dispatch_mgr_root_queue)) { _dispatch_mgr_root_queue_init(); } #endif do { _dispatch_retain(dq); // released in _dispatch_worker_thread #if DISPATCH_DEBUG unsigned dwStackSize = 0; #else unsigned dwStackSize = 64 * 1024; #endif uintptr_t hThread = 0; while (! (hThread = _beginthreadex(NULL, dwStackSize, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) { if (errno ! = EAGAIN) { (void)dispatch_assume(hThread); } _dispatch_temporary_resource_shortage(); } #if DISPATCH_USE_PTHREAD_ROOT_QUEUES if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) { (void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE); } #endif CloseHandle((HANDLE)hThread); } while (--remaining); #endif // defined(_WIN32) #else (void)floor; #endif // DISPATCH_USE_PTHREAD_POOL ... }Copy the code
View the value assigned to dgq_thread_pool_size:
Find the definition of DISPATCH_WORKQ_MAX_PTHREAD_COUNT:
#ifndef DISPATCH_WORKQ_MAX_PTHREAD_COUNT
#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255
#endif
Copy the code
255
Represents the theoretical maximum number of thread pools.
Open the official document and look at the following table:
- The stack space of a worker thread is
512KB
, and the minimum space a thread can occupy is16KB
In other words, in the case of a certain stack space, the larger the memory required to open up threads, the smaller the number of threads can be opened up. - Suppose that a
4GB
The memory ofiOS
System, memory is divided into kernel mode and user mode, if the kernel mode is all used to create threads, that is1GB
Space, then theoretically can open up1024KB / 16KB
A thread.
Three, singleton underlying principle
Use of singletons:
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
});
Copy the code
Enter the _dispatch_once:
void
dispatch_once(dispatch_once_t *val, dispatch_block_t block)
{
dispatch_once_f(val, block, _dispatch_Block_invoke(block));
}
Copy the code
Enter the dispatch_once_f:
void dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) { dispatch_once_gate_t l = (dispatch_once_gate_t)val; #if ! DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER uintptr_t v = os_atomic_load(&l->dgo_once, acquire); If (likely(v == DLOCK_ONCE_DONE)) {return; } #if DISPATCH_ONCE_USE_QUIESCENT_COUNTER if (likely(DISPATCH_ONCE_IS_GEN(v))) { return _dispatch_once_mark_done_if_quiesced(l, v); If (_dispatch_once_gate_tryenter(l)) {return _dispatch_once_callout(l, CTXT, func); } return _dispatch_once_wait(l); }Copy the code
Enter the _dispatch_once_gate_tryenter:
Static inline bool _dispatch_once_gate_tryEnter (dispatch_once_gate_t l) { Return OS_atomic_cmpxchg (&l->dgo_once, DLOCK_ONCE_UNLOCKED, (uintptr_t)_dispatch_lock_value_for_self(), relaxed); }Copy the code
Enter the _dispatch_once_callout:
static void _dispatch_once_callout(dispatch_once_gate_t l, void *ctxt, Dispatch_function_t func) {// Execute the operation. _dispatch_client_callout(CTXT, func); // Dispatch_once_gate_broadcast (l); }Copy the code
Enter the _dispatch_once_gate_broadcast:
static inline void
_dispatch_once_gate_broadcast(dispatch_once_gate_t l)
{
dispatch_lock value_self = _dispatch_lock_value_for_self();
uintptr_t v;
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
v = _dispatch_once_mark_quiescing(l);
#else
v = _dispatch_once_mark_done(l);
#endif
if (likely((dispatch_lock)v == value_self)) return;
_dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)v);
}
Copy the code
Enter the _dispatch_once_mark_done:
Static uintptr_dispatch_once_mark_done (dispatch_once_gate_t dGO) {// Mark DLOCK_ONCE_DONE return os_atomic_xchg(&dgo->dgo_once, DLOCK_ONCE_DONE, release); }Copy the code
Corresponding to the judgment in dispatch_onCE_F: