Software components required to implement the Dispatch Queue (code)

libdispatch

The following is just an analysis of the Dispatch Queue.

GCD queues are organized hierarchically:

There are several main data structures in GCD:

The Dispatch Queue in GCD is implemented through linked lists and constructs.

Queue inheritance:

Structs and macros commonly used in GCD

// Declare a structure. We can see the relationship between name_t and name_s inside GCD. A thing#define DISPATCH_DECL(name) typedef struct name##_s *name##_t
Copy the code
Typedef union {struct _os_object_s *_os_obj; // struct _os_object_s *_os_obj; Struct dispatch_object_s *_do; Os_object struct dispatch_continuation_s *_dc; Struct dispatch_queue_s *_dq; Struct dispatch_queue_attr_s *_dqa; Struct dispatch_group_s *_dg; Struct dispatch_semaphore_s *_dsema; } dispatch_object_t DISPATCH_TRANSPARENT_UNION;Copy the code
// Struct _os_object_s {_OS_OBJECT_HEADER(const _os_object_vtable_s *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); } _os_object_s; // The system object header defines the macro#define _OS_OBJECT_HEADER(isa, ref_cnt, xref_cnt) isa; // isa pointer int volatile ref_cnt; Int volatile xref_cnt volatile xref_cnt volatile xref_cnt volatile xref_cnt volatile xref_cnt volatile xref_cnt volatile xref_cntCopy the code
struct dispatch_object_s {
	_DISPATCH_OBJECT_HEADER(object);
};
#define _DISPATCH_OBJECT_HEADER(x) struct _os_object_s _as_os_obj[0]; Os_object OS_OBJECT_STRUCT_HEADER(dispatch_##x); 
	struct dispatch_##x##_s *volatile do_next; // List next
	struct dispatch_queue_s *do_targetq; 
	void *do_ctxt; 
	void *do_finalizer
Copy the code
// Tasks in GCD are encapsulated as dispatch_CONTINUations and then submitted to queues. This Dispatch_CONTINUATION is used to store the Dispatch_group of the task and some other information, equivalent to what is commonly called the execution context. typedef struct dispatch_continuation_s { struct dispatch_object_s _as_do[0]; // Continuation from dispatch_object DISPATCH_CONTINUATION_HEADER(continuation); // Some properties of continuation} *dispatch_continuation_t;#define DISPATCH_CONTINUATION_HEADER(x) 
	union { 
		const void *do_vtable; 
		uintptr_t dc_flags; 
	}; 
	union { 
		pthread_priority_t dc_priority; 
		int dc_cache_cnt; 
		uintptr_t dc_pad; 
	}; 
	struct voucher_s *dc_voucher; 
	struct dispatch_##x##_s *volatile do_next; dispatch_function_t dc_func; // Task function (block is converted tofunction) void * dc_ctxt; Void *dc_data; void *dc_otherCopy the code
Struct dispatch_queue_s {_DISPATCH_QUEUE_HEADER(queue); DISPATCH_QUEUE_CACHELINE_PADDING; //for static queues only
} DISPATCH_ATOMIC64_ALIGN;

#define _DISPATCH_QUEUE_HEADER(x) struct os_mpsc_queue_s _as_oq[0]; DISPATCH_OBJECT_HEADER(x); Uint32_t dq_side_suspend_cnt; DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, const uint16_t dq_width, Uint16_t __dq_opaque); DISPATCH_INTROSPECTION_QUEUE_HEADER#define DISPATCH_INTROSPECTION_QUEUE_HEADER TAILQ_ENTRY(dispatch_queue_s) diq_list; dispatch_unfair_lock_s diq_order_top_head_lock; dispatch_unfair_lock_s diq_order_bottom_head_lock; TAILQ_HEAD(, dispatch_queue_order_entry_s) diq_order_top_head; // TAILQ_HEAD(, Diq_order_bottom_head struct dispatch_queue_attr_s { OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr); };Copy the code
typedef struct dispatch_continuation_vtable_s {
	_OS_OBJECT_CLASS_HEADER();
	DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation);
} const *dispatch_continuation_vtable_t;

#define DISPATCH_INVOKABLE_VTABLE_HEADER(x) \unsigned long const do_type; // type const char *const do_kind; Void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \dispatch_invoke_flags_t); Void (*const do_push)(struct x##_s *, dispatch_object_t, \Dispatch_qos_t)//  DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, queue, .do_type = DISPATCH_QUEUE_SERIAL_TYPE, .do_kind ="serial-queue",
	.do_dispose = _dispatch_queue_dispose,
	.do_suspend = _dispatch_queue_suspend,
	.do_resume = _dispatch_queue_resume,
	.do_finalize_activation = _dispatch_queue_finalize_activation,
	.do_push = _dispatch_queue_push,
	.do_invoke = _dispatch_queue_invoke,
	.do_wakeup = _dispatch_queue_wakeup,
	.do_debug = dispatch_queue_debug,
	.do_set_targetq = _dispatch_queue_set_target_queue,
);


Copy the code

There are several main apis in GCD:

FIFO queues are managed by functions such as dispatch_async (operating head and tail nodes). In GCD, executable tasks are implemented in two ways: blocks and functions. So task-specific apis generally come in two forms:

dispatch_async(dispatch_queue_t queue, dispatch_block_t block);
dispatch_async_f(dispatch_queue_t queue,
	void *_Nullable context,
	dispatch_function_t work);
Copy the code

Get global queue

  1. The implementation code
Dispatch_queue_t dispatch_get_global_queue(long priority, unsigned long flags) {// Cause of null value returnedif (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) {
		returnDISPATCH_BAD_INPUT; } dispatch_qOS_T qos = _dispatch_qOS_FROM_queue_priority (priority);#if ! HAVE_PTHREAD_WORKQUEUE_QOS
	if (qos == QOS_CLASS_MAINTENANCE) {
		qos = DISPATCH_QOS_BACKGROUND;
	} else if (qos == QOS_CLASS_USER_INTERACTIVE) {
		qos = DISPATCH_QOS_USER_INITIATED;
	}
#endif
	if (qos == DISPATCH_QOS_UNSPECIFIED) {
		returnDISPATCH_BAD_INPUT; } // overcommit = NO // Get the priority from the system queue arrayreturn _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT);
}
Copy the code
static inline dispatch_queue_t _dispatch_get_root_queue(dispatch_qos_t qos, Bool overCOMMIT) {// _dispatch_root_queues is an array of queues with the specified index. Overcommit =0/1return&_dispatch_root_queues[2 * (qos - 1) + overcommit]; } struct dispatch_queue_s _dispatch_root_queues[]; / / the skip zero 1 - main_q master queue / / / / / / 3 2 - mgr_q GCD internal management queue - mgr_root_q / / 4,5,6,7,8,9,10,11,12,13,14,15 - global the queues // This is followed by manually created queuesCopy the code
Dispatch_qos_t:#define DISPATCH_QOS_UNSPECIFIED ((dispatch_qos_t)0)
#define DISPATCH_QOS_MAINTENANCE ((dispatch_qos_t)1)
#define DISPATCH_QOS_BACKGROUND ((dispatch_qos_t)2)
#define DISPATCH_QOS_UTILITY ((dispatch_qos_t)3)
#define DISPATCH_QOS_DEFAULT ((dispatch_qos_t)4)
#define DISPATCH_QOS_USER_INITIATED ((dispatch_qos_t)5)
#define DISPATCH_QOS_USER_INTERACTIVE ((dispatch_qos_t)6)Priority enumeration of queues in the system:  enum { DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0, DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT, DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS, DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT, DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS, DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT, DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS, DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT, DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS, DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT, _DISPATCH_ROOT_QUEUE_IDX_COUNT, };Copy the code
  1. The global queue has 8 priorities

    In addition to the normal four, there are four overcommit

/ *! * @enum A flag bit of the queue * * @constant DISPATCH_QUEUE_OVERCOMMIT * indicates that when the thread pool runs out of threads (eg:64 threads), this queue forces the creation of a new thread to perform the task, no matter how busy the system is and no longer waiting. This means that more threads can be created than the number of cores. */ enum { DISPATCH_QUEUE_OVERCOMMIT = 0x2ull, };Copy the code
  1. Code test: Overcommit = YES

    overcommit = NO

The home side column

  1. Implementation code:
struct dispatch_queue_s _dispatch_main_q = {
	DISPATCH_GLOBAL_OBJECT_HEADER(queue_main),
#if ! DISPATCH_USE_RESOLVERS// The target queue of the main queue is: Default priority, globalqueue.do_targetq = &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT],#endif
	.dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) |
			DISPATCH_QUEUE_ROLE_BASE_ANON,
	.dq_label = "com.apple.main-thread",
	.dq_atomic_flags = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC | DQF_WIDTH(1),
	.dq_serialnum = 1,
};
Copy the code

Create a queue

  1. Code implementation
dispatch_queue_t
dispatch_queue_create(const char *label, dispatch_queue_attr_t attr)
{
	return _dispatch_queue_create_with_target(label, attr,
			DISPATCH_TARGET_QUEUE_DEFAULT(NULL), true);
}

static dispatch_queue_t
_dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
		dispatch_queue_t tq, bool legacy)
{
	//
	// Step 1: Normalize arguments (qos, overcommit, tq)
	//

	dispatch_qos_t qos = _dispatch_priority_qos(dqa->dqa_qos_and_relpri);

	_dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit;
	if(overcommit ! = _dispatch_queue_attr_overcommit_unspecified && tq) {if (tq->do_targetq) {
			DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and "
					"a non-global target queue"); }}if(tq && ! tq->do_targetq && tq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { // Handle discrepancies between attr and target queue, attributes winif (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
			if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) {
				overcommit = _dispatch_queue_attr_overcommit_enabled;
			} else{ overcommit = _dispatch_queue_attr_overcommit_disabled; }}if (qos == DISPATCH_QOS_UNSPECIFIED) {
			dispatch_qos_t tq_qos = _dispatch_priority_qos(tq->dq_priority);
			tq = _dispatch_get_root_queue(tq_qos,
					overcommit == _dispatch_queue_attr_overcommit_enabled);
		} else{ tq = NULL; }}else if(tq && ! tq->do_targetq) { // target is a pthread or runloop root queue, setting QoS or overcommit // is disallowedif(overcommit ! = _dispatch_queue_attr_overcommit_unspecified) { DISPATCH_CLIENT_CRASH(tq,"Cannot specify an overcommit attribute "
					"and use this kind of target queue"); }}else {
		if(overcommit == _dispatch_queue_attr_overcommit_unspecified) { // overcommit = dqa->dqa_concurrent ? _dispatch_queue_attr_overcommit_disabled : _dispatch_queue_attr_overcommit_enabled; }}if(! Tq) {// Create a queue manually, without setting the target queue, then obtain a queue from the system queue (the default priority globalQueue) as the target queue, if the serial queue is created, then the target queue is overcommit. Otherwise, it is not. (👆) Tq = _dispatch_get_root_queue(qos == DISPATCH_QOS_UNSPECIFIED? DISPATCH_QOS_DEFAULT : qos, overcommit == _dispatch_queue_attr_overcommit_enabled); } // // Step 2: Initialize the queue //if (legacy) {
		// if any of these attributes is specified, use non legacy classes
		if (dqa->dqa_inactive || dqa->dqa_autorelease_frequency) {
			legacy = false;
		}
	}

	const void *vtable;
	dispatch_queue_flags_t dqf = 0;
	if (legacy) {
		vtable = DISPATCH_VTABLE(queue);
	} else if (dqa->dqa_concurrent) {
		vtable = DISPATCH_VTABLE(queue_concurrent);
	} else {
		vtable = DISPATCH_VTABLE(queue_serial);
	}
	switch (dqa->dqa_autorelease_frequency) {
	case DISPATCH_AUTORELEASE_FREQUENCY_NEVER:
		dqf |= DQF_AUTORELEASE_NEVER;
		break;
	case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM:
		dqf |= DQF_AUTORELEASE_ALWAYS;
		break;
	}
	if (legacy) {
		dqf |= DQF_LEGACY;
	}
	if (label) {
		const char *tmp = _dispatch_strdup_if_mutable(label);
		if(tmp ! = label) { dqf |= DQF_LABEL_NEEDS_FREE; label = tmp; }} dispatch_queue_t dq = _dispatch_object_alloc(vtable, sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); _dispatch_queue_init(dq, DQF, dQA ->dqa_concurrent? DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER | (dqa->dqa_inactive ? DISPATCH_QUEUE_INACTIVE : 0)); dq->dq_label = label; dq->dq_priority = dqa->dqa_qos_and_relpri;if(! Dq ->dq_priority) {// The priority of an unset queue is inherited by default from its destination queue _dispatch_queue_priority_inherit_from_target(dq, tq); }else if (overcommit == _dispatch_queue_attr_overcommit_enabled) {
		dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
	}
	if(! dqa->dqa_inactive) { _dispatch_queue_inherit_wlh_from_target(dq, tq); } _dispatch_retain(tq); // set target queue dq->do_targetq = tq; _dispatch_object_debug(dq,"%s", __func__);
	return_dispatch_introspection_queue_create(dq); } // dispatch_queue_t _dispatch_introspection_queue_create(dispatch_queue_t dq) { TAILQ_INIT(&dq->diq_order_top_head); TAILQ_INIT(&dq->diq_order_bottom_head); _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); // Add TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, dq, diq_list); // Insert queue into queue array _dispatch_unfair_lock_UNLOCK (&_dispatch_introspection.queues_lock); // unlock DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create, Dq);if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create)) {
		_dispatch_introspection_queue_create_hook(dq);
	}
	return dq;
}

Copy the code

Setting the destination queue

  1. Implementation code:
void _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq) { // global/main queue dispatch_assert(dq->do_ref_cnt ! = DISPATCH_OBJECT_GLOBAL_REFCNT && dq->do_targetq); // If tq is null, set the default target queue to createQif(unlikely(! tq)) { bool is_concurrent_q = (dq->dq_width > 1); tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, ! is_concurrent_q); }if (_dispatch_queue_try_inactive_suspend(dq)) {
		_dispatch_object_set_target_queue_inline(dq, tq);
		return dx_vtable(dq)->do_resume(dq, false);
	}

	if(unlikely(! _dispatch_queue_is_legacy(dq))) {#if 1
		if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) {
			DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue "
					"already targeted by other dispatch objects");
		}
#endif
		DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object "
				"after it has been activated");
	}

	unsigned long type = dx_type(dq);
	switch (type) {
	case DISPATCH_QUEUE_LEGACY_TYPE:
#if 1
		if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) {
			_dispatch_bug_deprecated("Changing the target of a queue "
					"already targeted by other dispatch objects");
		}
#endif
		break;
	case DISPATCH_SOURCE_KEVENT_TYPE:
	case DISPATCH_MACH_CHANNEL_TYPE:
		_dispatch_ktrace1(DISPATCH_PERF_post_activate_retarget, dq);
		_dispatch_bug_deprecated("Changing the target of a source "
				"after it has been activated");
		break;
	default:
		DISPATCH_CLIENT_CRASH(type."Unexpected dispatch object type");
	}

	_dispatch_retain(tq);
	return_dispatch_barrier_trysync_or_async_f(dq, tq, _dispatch_queue_legacy_set_target_queue, DISPATCH_BARRIER_TRYSYNC_SUSPEND);  }Copy the code

Add tasks to the queue synchronously

  1. Implementation code:
// The task is block,functionVoid dispatch_sync(dispatch_queue_t dq, dispatch_block_t work) {dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); / / _dispatch_Block_invoke: blockfunction
}

Copy the code
Void dispatch_sync_f(dispatch_queue_t dq, void * CTXT) {dispatch_function_t func (dispatch_queue_t dq); The barrier waits until the previous task has completed before it can start executing, which it does through semaphoresif (likely(dq->dq_width == 1)) {
		returndispatch_barrier_sync_f(dq, ctxt, func); } // If the current queue blocks the barrier or the number of threads exceeds the queue capacity, thread switching may be requiredif(unlikely(! _dispatch_queue_try_reserve_sync_width(dq))) {return_dispatch_sync_f_slow(dq, ctxt, func, 0); } // If you want to add a task deadlock to the current serial queue at the same time _dispatch_introspection_sync_BEGIN (dq);if (unlikely(dq->do_targetq->do_targetq)) {
		return_dispatch_sync_recurse(dq, ctxt, func, 0); } _dispatch_sync_invoke_and_complete(dq, ctxt, func); // execute the function}Copy the code

Note deadlocks:

Asynchronously adds a task to the queue

  1. Implementation code:
dispatch_async(dispatch_queue_t dq, Dispatch_continuation_t dc = _dispatch_continuation_alloc(); uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; _dispatch_continuation_init(dc, dq, work, 0, 0, dc_flags); _dispatch_async_f2(dq, dc); } static void _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) {if(slowpath(dq->dq_items_tail)) {// If the slowpath(dq->dq_items_tail) exists, the task is in the current queue, and the task needs to be submitted directly to the current queue.return 	dx_push(dq, dc, _dispatch_continuation_override_qos(dq, dc));
	}

	if(slowpath(! _dispatch_queue_try_acquire_async(dq))) {returndx_push(dq, dc, _dispatch_continuation_override_qos(dq, dc)); } // Otherwise you need to submit tasks up the chainreturn_dispatch_async_f_redirect(dq, dc, _dispatch_continuation_override_qos(dq, dc)); } // dispatch_async_f_redirect(dispatch_queue_t dq, dispatch_object_t dou, dispatch_qos_t qos) { dq = dq->do_targetq; // According to the queue inheritance chain, the task is submitted layer by layer up until the target queue is serial queue, which means that the task is submitted to the target queue. //#define DISPATCH_QUEUE_USES_REDIRECTION(width) \
		({ uint16_t _width = (width); \
		_width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
		
	whileSlowpath (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) {// omit middle content dq = dq->do_targetq; } dx_push(dq, dou, qos); } dx_push: static inline void _dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, Struct dispatch_object_s *tail = _tail._do; dispatch_wakeup_flags_t flags = 0; bool overriding = _dispatch_queue_need_override_retain(dq, qos);if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
		if(! overriding) _dispatch_retain_2(dq->_as_os_obj); _dispatch_queue_push_update_head(dq, tail); flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY; }else if (overriding) {
		flags = DISPATCH_WAKEUP_CONSUME_2;
	} else {
		return;
	}
	returndx_wakeup(dq, qos, flags); // Invoke the next task}Copy the code

Get the current queue

dispatch_get_current_queue()

Queues in the system are organized according to the hierarchical relationship. To obtain the current queue, that is, to obtain the queue associated with the thread executing the current code. Due to the inheritance relationship of the target queue, tasks will be submitted layer by layer to the root queue. So dispatch_get_current_queue() may return a different result from any of the queues associated with the current execution environment, so using it may cause errors or even deadlock problems.

Because an error with dispatch_get_current_queue() results in an else statement, if the current queue is a queue, it causes a synchronous wait (semaphore lock implementation and therefore a deadlock problem).

void func(dispatch_queue_t queue, dispatch_block_t block)
{
    if (dispatch_get_current_queue() == queue) {
        block();
    }else{ dispatch_sync(queue, block); }}Copy the code
How to get the current queue:

Dispatch_queue_set_specific (queue, key) // Set an associated key for the queue

Dispatch_get_specific (key) // Queue for the corresponding key value in the execution environment of the current task (a succession of queues). This is more accurate.

    dispatch_queue_t q1 = dispatch_queue_create("", NULL);
    dispatch_queue_t q3 = dispatch_queue_create("", NULL);
    dispatch_set_target_queue(q3, q1);

    static int specificKey;
    CFStringRef specificValue = CFSTR("queue1");
    dispatch_queue_set_specific(q1,
                                &specificKey,
                                (void*)specificValue,
                                (dispatch_function_t)CFRelease);
    
    
    dispatch_sync(q3, ^{
        dispatch_block_t block = ^{
            //dosomething }; Q1 CFStringRef retrievedValue = dispatch_get_specific(&specificKey); q1 CFStringRef retrievedValue = dispatch_get_specific(&specificKey);if(retrievedValue) {// Yes, just execute block block(); }else{// no, dispatch_sync(q1, block) does not cause synchronization block; }});Copy the code

Implementation code:

void *
dispatch_get_specific(const void *key)
{
	if(slowpath(! key)) {returnNULL; } void *ctxt = NULL; dispatch_queue_t dq = _dispatch_queue_get_current(); // Get the queue chain in which the current task is executedwhileCTXT = _dispatch_queue_get_specific_inline(dq, key);if (ctxt) break;
		dq = dq->do_targetq;
	}
	return ctxt;
}
Copy the code

Reentrant concept

The GCD use

Typically, when data is downloaded, it is processed asynchronously so as not to interfere with the main thread. Will the GCD thread be used for every network download processing? The answer is no. If you generate one thread per download, you are likely to generate so many threads that you will quickly run out of threads and memory in the thread pool. Therefore, the asynchronous network communication API provided by the system, it generates a thread dedicated to asynchronous downloading, and at the same time, opens the runloop of this thread, and adds a port to keep the runloop running, so that the thread will not be destroyed. When there is a download task, the thread is awakened to execute the task, and no task is suspended.