OC底层原理20-GCD底层分析

iOS--OC底层原理文章汇总

前一文章介绍了GCD的概念、使用,以及函数与队列搭配情况,本文将就GCD底层进行分析。

队列创建

准备libdispatch工程。(进入Apple的工程下载找到libdispatch

分析队列创建就不得不在底层源码中分析dispatch_queue_create,我们查询源码工程找到了以下定义:

dispatch_queue_t
dispatch_queue_create(const char *label, dispatch_queue_attr_t attr)
{
        /**
         label: 是队列标签,比如自定义的 com.myQueue,系统的 com.apple.main-thread
        */
    return _dispatch_lane_create_with_target(label, attr,
            DISPATCH_TARGET_QUEUE_DEFAULT, true);
}
DISPATCH_NOINLINE
static dispatch_queue_t
_dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa,
        dispatch_queue_t tq, bool legacy)
{
    // 创建 dqai  可以获得是否为并发 或 串行队列
    dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa);

    //
    // Step 1: Normalize arguments (qos, overcommit, tq)
    //  规范化参数: qos,overcommit,tq

    dispatch_qos_t qos = dqai.dqai_qos;
#if !HAVE_PTHREAD_WORKQUEUE_QOS
    if (qos == DISPATCH_QOS_USER_INTERACTIVE) {
        dqai.dqai_qos = qos = DISPATCH_QOS_USER_INITIATED;
    }
    if (qos == DISPATCH_QOS_MAINTENANCE) {
        dqai.dqai_qos = qos = DISPATCH_QOS_BACKGROUND;
    }
#endif // !HAVE_PTHREAD_WORKQUEUE_QOS

    _dispatch_queue_attr_overcommit_t overcommit = dqai.dqai_overcommit;
    if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) {
        if (tq->do_targetq) {
            DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and "
                    "a non-global target queue");
        }
    }
    //.........
        
    //
    // Step 2: Initialize the queue 初始化queue
    //
    const void *vtable;
    dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0;
    if (dqai.dqai_concurrent) {
        // 获取拼接类信息 OS_dispatch_queue_concurrent
        vtable = DISPATCH_VTABLE(queue_concurrent);
    } else {
       // OS_dispatch_queue_serial
        vtable = DISPATCH_VTABLE(queue_serial);
    }
    switch (dqai.dqai_autorelease_frequency) {
    case DISPATCH_AUTORELEASE_FREQUENCY_NEVER:
        dqf |= DQF_AUTORELEASE_NEVER;
        break;
    case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM:
        dqf |= DQF_AUTORELEASE_ALWAYS;
        break;
    }
    if (label) {
        const char *tmp = _dispatch_strdup_if_mutable(label);
        if (tmp != label) {
            dqf |= DQF_LABEL_NEEDS_FREE;
            label = tmp;
        }
    }
    // 创建队列
    dispatch_lane_t dq = _dispatch_object_alloc(vtable,
            sizeof(struct dispatch_lane_s));
    // 初始化队列;根据dqai.dqai_concurrent的值,决定队列 是 串行 还是并发
    _dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ?
            DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
            (dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0));
         //.........
        // 设置队列标识符
    dq->dq_label = label;
    dq->dq_priority = _dispatch_priority_make((dispatch_qos_t)dqai.dqai_qos,
            dqai.dqai_relpri);
    if (overcommit == _dispatch_queue_attr_overcommit_enabled) {
        dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
    }
    if (!dqai.dqai_inactive) {
        _dispatch_queue_priority_inherit_from_target(dq, tq);
        _dispatch_lane_inherit_wlh_from_target(dq, tq);
    }
    _dispatch_retain(tq);
    dq->do_targetq = tq;
    _dispatch_object_debug(dq, "%s", __func__);
    //   dp作为研究对象
    return _dispatch_trace_queue_create(dq)._dq;
}

_dispatch_trace_queue_create -> _dispatch_introspection_queue_create(dqu);
_dispatch_introspection_queue_create对上一层创建之后的dq经过两层包装、处理再返回了dq

_dispatch_introspection_queue_create(dispatch_queue_t dq)
{
    dispatch_queue_introspection_context_t dqic;
    size_t sz = sizeof(struct dispatch_queue_introspection_context_s);

    if (!_dispatch_introspection.debug_queue_inversions) {
        sz = offsetof(struct dispatch_queue_introspection_context_s,
                __dqic_no_queue_inversion);
    }
    dqic = _dispatch_calloc(1, sz);
    dqic->dqic_queue._dq = dq;
    if (_dispatch_introspection.debug_queue_inversions) {
        LIST_INIT(&dqic->dqic_order_top_head);
        LIST_INIT(&dqic->dqic_order_bottom_head);
    }
    dq->do_finalizer = dqic;

    _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock);
    LIST_INSERT_HEAD(&_dispatch_introspection.queues, dqic, dqic_list);
    _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock);

    DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create, dq);
    if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create)) {
        _dispatch_introspection_queue_create_hook(dq);
    }
    return upcast(dq)._dqu;
}

需要研究_dispatch_lane_create_with_target中做了哪些操作,都是围绕dq展开。

  • 1、_dispatch_queue_attr_to_info创建类型对象,用来存储队列的相关信息。

  • 2、设置队列的关联属性qos

  • 3、初始化队列;通过DISPATCH_VTABLE拼接队列名称

#define DISPATCH_VTABLE(name) DISPATCH_OBJC_CLASS(name)
#define DISPATCH_OBJC_CLASS(name)   (&DISPATCH_CLASS_SYMBOL(name))
#define DISPATCH_CLASS_SYMBOL(name) OS_dispatch_##name##_class
#define DISPATCH_CLASS(name) OS_dispatch_##name

判断dqai.dqai_concurrent决定是串行队列还是并发队列。

并发队列:OS_dispatch_queue_serial
并发队列:OS_dispatch_queue_concurrent

  • 4、_dispatch_object_alloc创建队列
void *
_dispatch_object_alloc(const void *vtable, size_t size)
{
#if OS_OBJECT_HAVE_OBJC1
    const struct dispatch_object_vtable_s *_vtable = vtable;
    dispatch_object_t dou;
    dou._os_obj = _os_object_alloc_realized(_vtable->_os_obj_objc_isa, size);
    dou._do->do_vtable = vtable;
    return dou._do;
#else
    return _os_object_alloc_realized(vtable, size);
#endif
}
👇
inline _os_object_t
_os_object_alloc_realized(const void *cls, size_t size)
{
    _os_object_t obj;
    dispatch_assert(size >= sizeof(struct _os_object_s));
    while (unlikely(!(obj = calloc(1u, size)))) {
        _dispatch_temporary_resource_shortage();
    }
    obj->os_obj_isa = cls;  // 更改isa指向
    return obj;
}

创建队列过程中,有更改isa指向操作,说明队列是对象。

  • 5、_dispatch_queue_init初始化dq,设置一些属性
// Note to later developers: ensure that any initialization changes are
// made for statically allocated queues (i.e. _dispatch_main_q).
static inline dispatch_queue_class_t
_dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf,
        uint16_t width, uint64_t initial_state_bits)
{
    uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);
    dispatch_queue_t dq = dqu._dq;

    dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK |
            DISPATCH_QUEUE_INACTIVE)) == 0);

    if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) {
        dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_lane_resume
        if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) {
            dq->do_ref_cnt++; // released when DSF_DELETED is set
        }
    }

    dq_state |= initial_state_bits;
    dq->do_next = DISPATCH_OBJECT_LISTLESS;
    dqf |= DQF_WIDTH(width);
    os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
    dq->dq_state = dq_state;
    dq->dq_serialnum =
            os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
    return dqu;
}

初始化完成之后设置dq_label、dq_priority

  • 6、_dispatch_trace_queue_create -> _dispatch_introspection_queue_create -> return upcast(dq)._dqu; ,通过对队列进行操作并将处理完的dq返回。

全局队列

dispatch_queue_global_t
dispatch_get_global_queue(long priority, unsigned long flags)
{
    dispatch_assert(countof(_dispatch_root_queues) ==
            DISPATCH_ROOT_QUEUE_COUNT);

    if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) {
        return DISPATCH_BAD_INPUT;
    }
    dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority);
#if !HAVE_PTHREAD_WORKQUEUE_QOS
    if (qos == QOS_CLASS_MAINTENANCE) {
        qos = DISPATCH_QOS_BACKGROUND;
    } else if (qos == QOS_CLASS_USER_INTERACTIVE) {
        qos = DISPATCH_QOS_USER_INITIATED;
    }
#endif
    if (qos == DISPATCH_QOS_UNSPECIFIED) {
        return DISPATCH_BAD_INPUT;
    }
    return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT);
}

// ----------------------

DISPATCH_ALWAYS_INLINE DISPATCH_CONST
static inline dispatch_queue_global_t
_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
{
    if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) {
        DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
    }
    return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
}

从底层获取获取合适的队列,这可以佐证它的并发性。
通过dispatch_queue_global_s这个结构体获取全局各种属性的global_quue.

获取底层全局队列

4个队列

主队列

/*!
 * @function dispatch_get_main_queue
 *
 * @abstract
 * Returns the default queue that is bound to the main thread.
 *
 * @discussion
 * In order to invoke blocks submitted to the main queue, the application must
 * call dispatch_main(), NSApplicationMain(), or use a CFRunLoop on the main
 * thread.
 *
 * The main queue is meant to be used in application context to interact with  the main thread and the main runloop.
 *
 * Because the main queue doesn't behave entirely like a regular serial queue,
 * it may have unwanted side-effects when used in processes that are not UI apps
 * (daemons). For such processes, the main queue should be avoided.
 *
 * @see dispatch_queue_main_t
 *
 * @result
 * Returns the main queue. This queue is created automatically on behalf of the main thread before main() is called.
 */
DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_CONST DISPATCH_NOTHROW
dispatch_queue_main_t
dispatch_get_main_queue(void)
{
    return DISPATCH_GLOBAL_OBJECT(dispatch_queue_main_t, _dispatch_main_q);
}

可以得到几点信息:
1.它是绑定到主线程的默认队列;
2.主队列用于在应用程序上下文中与主线程和主运行循环进行交互
3.主队列不想常规的串行队列,在非UI应用程序使用,反而会发生不好的作用,应该避免
4.它是自动创建的队列,且发生在main()调用之前。

通过查找dispatch_queue_main_t,我们找到了它的定义,它是绑定到主线程的默认队列的类型。也指出了主队列是一个串行队列。主队列是一个众所周知的全局对象,该对象自动在在进程初始化期间代表主线程,并由返回dispatch_get_main_queue(),并且无法修改该对象。

dispatch_queue_main_t定义

由上面的信息:创建队列发生在mian()调用之前,这可以联想到dyld的流程,在OC底层原理10—应用程序加载中流程图中简单介绍到main调用之前,初始化时会走底层的libdispatch_init.

libdispatch_init关于主队列的创建

函数:异步函数 & 同步函数

异步函数

先通过打印异步函数的堆栈来看看异步函数调用过程中底层的调用顺序


异步函数调用的堆栈顺序

_dispatch_root_queue_drain -> _dispatch_continuation_pop_inline -> _dispatch_continuation_invoke_inline -> _dispatch_client_callout -> _dispatch_call_block_and_release

DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_invoke_inline(dispatch_object_t dou,
        dispatch_invoke_flags_t flags, dispatch_queue_class_t dqu)
{
    dispatch_continuation_t dc = dou._dc, dc1;
    dispatch_invoke_with_autoreleasepool(flags, {
        uintptr_t dc_flags = dc->dc_flags;
        // Add the item back to the cache before calling the function. This
        // allows the 'hot' continuation to be used for a quick callback.
        //
        // The ccache version is per-thread.
        // Therefore, the object has not been reused yet.
        // This generates better assembly.
        _dispatch_continuation_voucher_adopt(dc, dc_flags);
        if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
            _dispatch_trace_item_pop(dqu, dou);
        }
        if (dc_flags & DC_FLAG_CONSUME) {
            dc1 = _dispatch_continuation_free_cacheonly(dc);
        } else {
            dc1 = NULL;
        }
        if (unlikely(dc_flags & DC_FLAG_GROUP_ASYNC)) {
            _dispatch_continuation_with_group_invoke(dc);
        } else {
      // 里面为f(ctxt)返回 ,实际就是  _dispatch_call_block_and_release
            _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
  //
            _dispatch_trace_item_complete(dc);
        }
        if (unlikely(dc1)) {
            _dispatch_continuation_free_to_cache_limit(dc1);
        }
    });
    _dispatch_perfmon_workitem_inc();
}

// ---------
#undef _dispatch_client_callout
void
_dispatch_client_callout(void *ctxt, dispatch_function_t f)
{
    @try {
        return f(ctxt); // 返回一个方法调用,实际为  _dispatch_call_block_and_release
    }
    @catch (...) {
        objc_terminate();
    }
}

有了堆栈调用顺序的初步了解,再探究dispatch_async源码,如下:

void
dispatch_async(dispatch_queue_t dq, dispatch_block_t work)
{
    // 创建一个持续调度的对象
    dispatch_continuation_t dc = _dispatch_continuation_alloc();
    uintptr_t dc_flags = DC_FLAG_CONSUME;
    dispatch_qos_t qos;
    /**
     初始化调度对象:包装任务->接收work,保存work
    */
    qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
    // 并发处理
    _dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}
// --------------
DISPATCH_ALWAYS_INLINE
static inline dispatch_continuation_t
_dispatch_continuation_alloc(void)
{
    dispatch_continuation_t dc =
            _dispatch_continuation_alloc_cacheonly();
    if (unlikely(!dc)) {
        return _dispatch_continuation_alloc_from_heap();
    }
    return dc;
}
  • _dispatch_continuation_init :任务包装器
DISPATCH_ALWAYS_INLINE
static inline dispatch_qos_t
_dispatch_continuation_init(dispatch_continuation_t dc,
        dispatch_queue_class_t dqu, dispatch_block_t work,
        dispatch_block_flags_t flags, uintptr_t dc_flags)
{
    // 拷贝任务
    void *ctxt = _dispatch_Block_copy(work);

    dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED;
    if (unlikely(_dispatch_block_has_private_data(work))) {
   
        dc->dc_flags = dc_flags;
        dc->dc_ctxt = ctxt; // 将拷贝的任务地址赋值
        // 将初始化所有字段,但需要设置dc_flags和dc_ctxt
        return _dispatch_continuation_init_slow(dc, dqu, flags);
    }
     //封装work  -> 同步回调函数
    dispatch_function_t func = _dispatch_Block_invoke(work);
    if (dc_flags & DC_FLAG_CONSUME) {
    //异步回调函数
        func = _dispatch_call_block_and_release;
    }
    // 给回调函数赋值
    return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags);
}

// 0----------0
#define _dispatch_Block_invoke(bb) \
        ((dispatch_function_t)((struct Block_layout *)bb)->invoke)

//0---------0
void
_dispatch_call_block_and_release(void *block)
{
    void (^b)(void) = block;
    b();
    Block_release(b);
}

小结:_dispatch_continuation_init包装任务,并设置线程的回调函数,初始化调度对象,保存在qos的属性中。

  • _dispatch_continuation_async
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_async(dispatch_queue_class_t dqu,
        dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
#if DISPATCH_INTROSPECTION
    if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
        _dispatch_trace_item_push(dqu, dc);
    }
#else
    (void)dc_flags;
#endif
    return dx_push(dqu._dq, dc, qos);
}

并发执行block,其中dx_push(dqu._dq, dc, qos)定义如下

#define dx_push(x, y, z) dx_vtable(x)->dq_push(x, y, z)

dq_push则根据传入的队列类型,分别执行以下等不同的函数

dq_push根据队列执行对应函数

总结:异步函数被调用时,会先创建一个持续调度队列的对象dc,然后初始化调度对象,并将待执行任务包装,pthread开辟线程再通过invoke执行回调block,并发执行不同的调度函数是由队列的类型决定。

同步函数

dispatch_sync 源码如下

DISPATCH_NOINLINE
void
dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
    uintptr_t dc_flags = DC_FLAG_BLOCK;
    if (unlikely(_dispatch_block_has_private_data(work))) {
        return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
    }
    _dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}
  • _dispatch_sync_f 流程
DISPATCH_NOINLINE
static void
_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
        uintptr_t dc_flags)
{
    _dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
}
👇
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
        dispatch_function_t func, uintptr_t dc_flags)
{
    if (likely(dq->dq_width == 1)) {
      // 判断 dq_width 是否为1,表示一次只执行一个。就返回栅栏函数的同步实现
        return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
    }
    // 检查队列的类型是否正确 dispatch_sync
    if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
        DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
    }

    dispatch_lane_t dl = upcast(dq)._dl;
    // Global concurrent queues and queues bound to non-dispatch threads
    // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
    // 如果为全局并发队列或者绑定到非调度线程的队列,就进行缓慢同步执行,其实做一些判断之后最终还是要挨个执行
    if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
        return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
    }

    if (unlikely(dq->do_targetq->do_targetq)) {
    // 递归同步执行,一个一个执行,直到全部执行完成
        return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
    }
    // 内部开始同步执行
    _dispatch_introspection_sync_begin(dl);
    // 执行完,执行block回调
    _dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
            _dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}
👇
DISPATCH_NOINLINE
static void
_dispatch_sync_recurse(dispatch_lane_t dq, void *ctxt,
        dispatch_function_t func, uintptr_t dc_flags)
{
    dispatch_tid tid = _dispatch_tid_self();
    dispatch_queue_t tq = dq->do_targetq;

    do {
        if (likely(tq->dq_width == 1)) {
            if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) {
                return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq,
                        DC_FLAG_BARRIER);
            }
        } else {
            dispatch_queue_concurrent_t dl = upcast(tq)._dl;
            if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
                return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq, 0);
            }
        }
        tq = tq->do_targetq;
    } while (unlikely(tq->do_targetq));

    _dispatch_introspection_sync_begin(dq);
    _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags
            DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
                    dq, ctxt, func, dc_flags)));
}
👇
DISPATCH_NOINLINE
static void
_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_class_t dq,
        void *ctxt, dispatch_function_t func, uintptr_t dc_flags
        DISPATCH_TRACE_ARG(void *dc))
{
    _dispatch_sync_function_invoke_inline(dq, ctxt, func);
    _dispatch_trace_item_complete(dc);
    _dispatch_sync_complete_recurse(dq._dq, NULL, dc_flags);
}

这是 _dispatch_sync_f -> _dispatch_sync_f_inline流程,

  1. _dispatch_sync_f_inline中首先判断dq_width是否为1,返回执行栅栏同步函数实现
  2. 判断队列类型是否为dispatch_sync
  3. 判断如果为全局并发队列或者绑定到非调度线程的队列,就进行缓慢同步执行
  4. 判断是否执行递归调用
    5.开始执行同步执行,执行完成,回调block。

结合堆栈分析


dispatch_sync堆栈执行

不论串行队列下的同步还是并发队列下的同步,调用堆栈顺序相同。相比异步函数堆栈,少了许多,但在其底层还是做了许多的判断与操作的。

  • _dispatch_sync_block_with_privdata 流程
#ifdef __BLOCKS__
DISPATCH_NOINLINE
static void
_dispatch_sync_block_with_privdata(dispatch_queue_t dq, dispatch_block_t work,
        uintptr_t dc_flags)
{
    dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work);
    pthread_priority_t op = 0, p = 0;
    dispatch_block_flags_t flags = dbpd->dbpd_flags;

    if (flags & DISPATCH_BLOCK_BARRIER) {
        dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA | DC_FLAG_BARRIER;
    } else {
        dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA;
    }

    op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority);
    if (op) {
        p = dbpd->dbpd_priority;
    }
    voucher_t ov, v = DISPATCH_NO_VOUCHER;
    if (flags & DISPATCH_BLOCK_HAS_VOUCHER) {
        v = dbpd->dbpd_voucher;
    }
    ov = _dispatch_set_priority_and_voucher(p, v, 0);

    // balanced in d_block_sync_invoke or d_block_wait
    if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) {
        _dispatch_retain_2(dq);
    }
    if (dc_flags & DC_FLAG_BARRIER) {
        _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke,
                dc_flags);
    } else {
        _dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, dc_flags);
    }
    _dispatch_reset_priority_and_voucher(op, ov);
}

这是栅栏同步函数流程,但最终根据队列标记判断不是栅栏函数还是要走_dispatch_sync_f流程的。

补充「栅栏函数」

目的:控制任务同步执行,即使是异步函数+并发队列
方法:dispatch_barrier_async、dispatch_barrier_sync: 异步、同步栅栏函数都是能「阻拦」任务执行。后者的区别是会阻塞线程
注意:栅栏函数只能控制同意并发队列
使用:

    dispatch_queue_t concurrentQueue = dispatch_queue_create("cooci", DISPATCH_QUEUE_CONCURRENT);
    /* 1.异步函数 */
    dispatch_async(concurrentQueue, ^{
        sleep(1);
        NSLog(@"异步1:休息一下,再做事!");
    });
    
    /* 2. 栅栏函数 */
    dispatch_barrier_async(concurrentQueue, ^{
        NSLog(@"栅栏:== %@",[NSThread currentThread]);
    });
    /* 3. 异步函数 */
    dispatch_async(concurrentQueue, ^{
        
        NSLog(@"异步2:终于可以到我干活了吗");
    });
    NSLog(@"主线程:来活了");

//=========打印=========
[34550:1419250] 主线程:来活了
[34550:1419411] 异步1:休息一下,再做事!
[34550:1419412] 栅栏:== <NSThread: 0x600001bc5900>{number = 3, name = (null)}
[34550:1419412] 异步2:终于可以到我干活了吗

单例

在平常开发中,经常会使用到单例设计模式,保证一个类仅有一个实例,并提供一个访问它的全局访问点。主要解决的问题就是:避免一个全局使用的类频繁地创建与销毁。
一般编写单例的形式如下:

static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
     //TODO:初始化
});

// --可以查看dispatch_once_t的类型,它是一个长整型,通过取地址&做传参--- 
typdef long  dispatch_once_t; 

在这里探究当然是因为它的实现就是发生在libdispatch.dylib中的,所以溯源找到了以下定义

// once.c
void
dispatch_once(dispatch_once_t *val, dispatch_block_t block)
{
    dispatch_once_f(val, block, _dispatch_Block_invoke(block));
}
//------
#define _dispatch_Block_invoke(bb) \
        ( (dispatch_function_t) ((struct Block_layout *)bb)->invoke )
typedef void (*dispatch_function_t)(void *_Nullable);
//------

DISPATCH_NOINLINE
void
dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
{
    dispatch_once_gate_t l = (dispatch_once_gate_t)val;
// 非编译优化 或 采用一次静默计数器方法。判断是否已经存在一个实例化单例,存在即返回
#if !DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER
// 即创建一个标记,判断是否等于 DLOCK_ONCE_DONE(表示已经创建)
    uintptr_t v = os_atomic_load(&l->dgo_once, acquire);
    if (likely(v == DLOCK_ONCE_DONE)) {
        return;
    }
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    // 尝试加锁
    if (likely(DISPATCH_ONCE_IS_GEN(v))) {
    // 加锁失败之后
        return _dispatch_once_mark_done_if_quiesced(l, v);
    }
#endif
#endif
    //☆:尝试进入;判断 l(即oncetokden)是否为DLOCK_ONCE_UNLOCKED
    if (_dispatch_once_gate_tryenter(l)) {
        return _dispatch_once_callout(l, ctxt, func);
    }
    return _dispatch_once_wait(l);
}

// ----dispatch_gate_s + dispatch_once_gate_s ----- 
typedef struct dispatch_gate_s {
    dispatch_lock dgl_lock;
} dispatch_gate_s, *dispatch_gate_t;

typedef struct dispatch_once_gate_s {
    union {
        dispatch_gate_s dgo_gate;
        uintptr_t dgo_once; // dgo_once 作为判断已存在单例的标志
    };
} dispatch_once_gate_s, *dispatch_once_gate_t;


  • dispatch_once_t是长整型,定义成了一个静态变量,首先就它而已是具有唯一性。强转类型之后得到l, &l->dgo_once就作为判断是否已经创建实例化的标志。
  • #define DISPATCH_ONCE_IS_GEN(gen) (((gen) & 3) == DLOCK_FAILED_TRYLOCK_BIT) ,在采用静默计数器方法情况下,尝试加锁,加锁失败之后会执行_dispatch_once_mark_done_if_quiesced
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_once_mark_done_if_quiesced(dispatch_once_gate_t dgo, uintptr_t gen)
{
    if (_dispatch_once_generation() - gen >= DISPATCH_ONCE_GEN_SAFE_DELTA) {
        /*
         * See explanation above, when the quiescing counter approach is taken
         * then this store needs only to be relaxed as it is used as a witness
         * that the required barriers have happened.
         */
        // 再次存储标志,存储值为:DLOCK_ONCE_DONE. 
        os_atomic_store(&dgo->dgo_once, DLOCK_ONCE_DONE, relaxed);
    }
}
  • 正常单例初始化创建,第一次就会走这个流程_dispatch_once_gate_tryenter,尝试进入执行创建
DISPATCH_ALWAYS_INLINE
static inline bool
_dispatch_once_gate_tryenter(dispatch_once_gate_t l)
{
    // 先对比&l->dgo_once是否等于无锁DLOCK_ONCE_UNLOCKED;
    // 1、如果相等则更改其值,为_dispatch_lock_value_for_self() (即给自己加锁),返回YES
    // 2、如果不等,则DLOCK_ONCE_UNLOCKED赋值给l->dgo_once,最后NO
    return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED,
            (uintptr_t)_dispatch_lock_value_for_self(), relaxed);
}
#define os_atomic_cmpxchg(p, e, v, m) \
        ({ _os_atomic_basetypeof(p) _r = (e); \
        atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \
        &_r, v, memory_order_##m, memory_order_relaxed); })

尝试对比l->dgo_once 与 DLOCK_ONCE_UNLOCKED是否相同.根据对比结果返回YES\NO. 如果为YES,则_dispatch_once_callout执行block回调,同时向外广播状态,拒绝再次访问创建,。为NO,则执行_dispatch_once_wait,即在当前实例创建过程中,会加锁,当有需求再次创建时,会一直等待,除非锁被释放。

DISPATCH_NOINLINE
static void
_dispatch_once_callout(dispatch_once_gate_t l, void *ctxt,
        dispatch_function_t func)
{
    // 执行方法块 func,即block
    _dispatch_client_callout(ctxt, func);
    // 对外发送广播,当前已经有执行单例创建,拒绝再访问
    _dispatch_once_gate_broadcast(l);
}

// --_dispatch_client_callout--
#undef _dispatch_client_callout
void
_dispatch_client_callout(void *ctxt, dispatch_function_t f)
{
    @try {
        return f(ctxt);
    }
    @catch (...) {
        objc_terminate();
    }
}

// --_dispatch_once_gate_broadcast--
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_once_gate_broadcast(dispatch_once_gate_t l)
{
    dispatch_lock value_self = _dispatch_lock_value_for_self();
    uintptr_t v;
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    v = _dispatch_once_mark_quiescing(l);//标记为停顿
#else
    v = _dispatch_once_mark_done(l); // 标记 DLOCK_ONCE_DONE
#endif
    // 如果 v已经是自锁状态,即已经是初始化创建过单例对象 l(onceToken) = DLOCK_ONCE_DONE,就直接返回
    if (likely((dispatch_lock)v == value_self)) return;
    _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)v);
}

// ----_dispatch_once_mark_done--
DISPATCH_ALWAYS_INLINE
static inline uintptr_t
_dispatch_once_mark_done(dispatch_once_gate_t dgo)
{
    //DLOCK_ONCE_DONE 赋值给 dgo->dgo_once ,然后上锁,标记DLOCK_ONCE_DONE(-1),即onceToken 的值为 DLOCK_ONCE_DONE。
    return os_atomic_xchg(&dgo->dgo_once, DLOCK_ONCE_DONE, release);
}
// 
#define os_atomic_xchg(p, v, m) \
        atomic_exchange_explicit(_os_atomic_c11_atomic(p), v, memory_order_##m)

总结

【单例流程】先判断l是否第一次,如果为第一次来,则将DLOCK_ONCE_UNLOCKED赋值给 l->dgo_once,先加锁_dispatch_lock_value_for_self();返回YES执行_dispatch_once_callout调用block创建;
第二次时,即实例已经创建,判断os_atomic_load ()即 v是否等于一个 DLOCK_ONCE_DONE,如果等于,直接return。

【线程安全】有多个线程进来时,dispatch_lock_value_for_self()加锁了,保证了同一时间仅有一个线程在处理。当前线程未处理外,后来者将保持等待状态_dispatch_once_wait(),直到锁解除。

推荐阅读更多精彩内容

  • iOS 底层原理 文章汇总[https://www.jianshu.com/p/412b20d9a0f6] 本文是...
    Style_月月阅读 4,096评论 14 15
  • libdispatch 源码下载地址[https://opensource.apple.com/tarballs/...
    _zhang__阅读 323评论 0 1
  • GCD 介绍 什么是GCD? 全称是Grand Central Dispatch 纯C语⾔,提供了⾮常多强⼤的函数...
    北京_小海阅读 168评论 1 1
  • 久违的晴天,家长会。 家长大会开好到教室时,离放学已经没多少时间了。班主任说已经安排了三个家长分享经验。 放学铃声...
    飘雪儿5阅读 6,983评论 16 21
  • 创业是很多人的梦想,多少人为了理想和不甘选择了创业来实现自我价值,我就是其中一个。 创业后,我由女人变成了超人,什...
    亦宝宝阅读 1,566评论 4 1
  • 今天感恩节哎,感谢一直在我身边的亲朋好友。感恩相遇!感恩不离不弃。 中午开了第一次的党会,身份的转变要...
    迷月闪星情阅读 10,049评论 0 11
  • 可爱进取,孤独成精。努力飞翔,天堂翱翔。战争美好,孤独进取。胆大飞翔,成就辉煌。努力进取,遥望,和谐家园。可爱游走...
    赵原野阅读 2,458评论 1 1