CacheLookUp to slow lookup

  • Earlier we explored the quick lookup process
  • But when it’s not in the cache, it’s calledMissLabelDynamic
  • MissLabelDynamic = objc_msgSend_uncache
.endmacro

STATIC_ENTRY __objc_msgSend_uncached
UNWIND __objc_msgSend_uncached, FrameWithNoSaves

// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band p15 is the class to search

// key, method search
MethodTableLookup
TailCallFunctionPointer x17

END_ENTRY __objc_msgSend_uncached

//
.macro TailCallFunctionPointer
	// $0 = function pointer value
	br	$0
.endmacro

//
.macro MethodTableLookup

    SAVE_REGS MSGSEND

    // lookUpImpOrForward(obj, sel, cls, LOOKUP_INITIALIZE | LOOKUP_RESOLVER)
    // receiver and selector already in x0 and x1
    mov	x2, x16
    mov	x3, #3
    // Jump to _lookUpImpOrForward
    bl	_lookUpImpOrForward

    // IMP in x0
    mov	x17, x0

    RESTORE_REGS MSGSEND

.endmacro

Copy the code

LookUpImpOrForward parsing

NEVER_INLINE
IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior)
{
    // Create imPs for forwarding
    const IMP forward_imp = (IMP)_objc_msgForward_impcache;
    // variable imp, curClass(current class)
    IMP imp = nil;
    Class curClass;
    / / the lock
    runtimeLock.assertUnlocked();
    // If the class is not initialized
    if (slowpath(!cls->isInitialized())) {
        // The first message sent to a class is often +new or +alloc, or +self
        // which goes through objc_opt_* or various optimized entry points.
        //
        // However, the class isn't realized/initialized yet at this point,
        // and the optimized entry points fall down through objc_msgSend,
        // which ends up here.
        //
        // We really want to avoid caching these, as it can cause IMP caches
        // to be made with a single entry forever.
        //
        // Note that this check is racy as several threads might try to
        // message a given class for the first time at the same time,
        // in which case we might cache anyway.
        behavior | = LOOKUP_NOCACHE;
    }

    // runtimeLock is held during isRealized and isInitialized checking
    // to prevent races against concurrent realization.

    // runtimeLock is held during method search to make
    // method-lookup + cache-fill atomic with respect to method addition.
    // Otherwise, a category could be added but ignored indefinitely because
    // the cache was re-filled with the old value after the cache flush on
    // behalf of the category.
    / / lock
    runtimeLock.lock();

    // We don't want people to be able to craft a binary blob that looks like
    // a class but really isn't one and do a CFI attack.
    //
    // To make these harder we want to make sure this is a class that was
    // either built into the binary or legitimately registered through
    // objc_duplicateClass, objc_initializeClassPair or objc_allocateClassPair.
    // Check whether the class exists
    checkIsKnownClass(cls);
    // If the class is not loaded, reload it
    cls = realizeAndInitializeIfNeeded_locked(inst, cls, behavior & LOOKUP_INITIALIZE);
    // runtimeLock may have been dropped but is now locked again
    runtimeLock.assertLocked();
    curClass = cls;
    / /;; The loop is infinite, and there is no condition to determine the loop
    // Size of the Attempts class
    for (unsigned attempts = unreasonableClassCount();;) {
    // If the current class is shared cache
        if (curClass->cache.isConstantOptimizedCache(/* strict */true)) {
#if CONFIG_USE_PREOPT_CACHES
     // cache_getImp assembler method
            imp = cache_getImp(curClass, sel);
            if (imp) goto done_unlock;
            curClass = curClass->cache.preoptFallbackClass();
#endif
        } else {
            // curClass method list.
            // Set sel to meth
            Method meth = getMethodNoSuper_nolock(curClass, sel);
            if (meth) {
                / / get the imp
                imp = meth->imp(false);
                // Execute done to end the loop
                goto done;
            }
            // If the current class does not look for meth, look for its parent, curClass = superclass, until it finds nil
            if (slowpath((curClass = curClass->getSuperclass()) = = nil)) {
                // No implementation found, and method resolver didn't help.
                // Use forwarding.
                // If not found, forward_imp is assigned to end the loop
                imp = forward_imp;
                break; }}// Halt if there is a cycle in the superclass chain.
        // If all the classes in the current cache are found
        if (slowpath(--attempts = = 0)) {
            _objc_fatal("Memory corruption in class list.");
        }

        // Superclass cache.
        // curClass = Superclass
        // Superclass -> Quickly find cache_getImp assembly code that does not call uncache, but returns to loop again
        /* STATIC_ENTRY _cache_getImp GetClassFromIsa_p16 p0, 0 CacheLookup GETIMP, _cache_getImp, LGetImpMissDynamic, LGetImpMissConstant LGetImpMissDynamic: mov p0, #0 ret */
        imp = cache_getImp(curClass, sel);
        
        // If forward_IMP, end the loop
        if (slowpath(imp = = forward_imp)) {
            // Found a forward:: entry in a superclass.
            // Stop searching, but don't cache yet; call method
            // resolver for this class first.
            break;
        }
        
        // Execute done if found
        if (fastpath(imp)) {
            // Found the method in a superclass. Cache it in this class.goto done; }}// No implementation found. Try method resolver once.
    
    // If none is found, execute resolveMethod_locked
    if (slowpath(behavior & LOOKUP_RESOLVER)) {
        behavior ^ = LOOKUP_RESOLVER;
        return resolveMethod_locked(inst, sel, cls, behavior);
    }

// If found, go done
 done:
    // behavior = LOOKUP_INITIALIZE | LOOKUP_RESOLVER
    if (fastpath((behavior & LOOKUP_NOCACHE) = = 0)) {
    // Find the shared cache
#if CONFIG_USE_PREOPT_CACHES
        while (cls->cache.isConstantOptimizedCache(/* strict */true)) {
            cls = cls->cache.preoptFallbackClass();
        }
#endif
      // call insertCache to insertCache
        log_and_fill_cache(cls, imp, sel, inst, curClass);
    }
 done_unlock:
    runtimeLock.unlock();
    if (slowpath((behavior & LOOKUP_NIL) && imp = = forward_imp)) {
        return nil;
    }
    return imp;
}

Copy the code
  • _objc_msgForward_impcacheAssembles the method to create oneforward_imp
  • Check if the class is initialized
  • Check whether the class is loaded
  • If not loaded, load the class
  • Start an endless loop
  • If it is a shared cache, look for the shared cache
  • If not, callgetMethodNoSuper_nolockThrough theselfTo findmethod
  • If you havemethodAnd take out theimpAnd then calldoneAnd then adjustlog_and_fill_cache, insert cache

To return to the imp

  • If there is nomethod.curCls = superClass, find the parent class, callache_getImp(curClass, sel)Assembly, in fact, is calledCache_LookUp.fastTo find theimpIf it doesn’t find nil, it returns nil, and it goes through the loop again. , there is awaydoneIf not, the loop continues to find the parent class untilnil
  • Called when all classes are not foundresolveMethod_lockedTo enter the message resolution phase

How to find getMethodNoSuper_nolockmethod


static method_t *
getMethodNoSuper_nolock(Class cls, SEL sel)
{
    runtimeLock.assertLocked();

    ASSERT(cls->isRealized());
    // fixme nil cls? 
    // fixme nil sel?

    // Find the list of methods
    auto const methods = cls->data()->methods();
    / / traverse
    for (auto mlists = methods.beginLists(),
              end = methods.endLists();
         mlists ! = end;
         ++mlists)
    {
        // <rdar://problem/46904873> getMethodNoSuper_nolock is the hottest
        // caller of search_method_list, inlining it turns
        // getMethodNoSuper_nolock into a frame-less function and eliminates
        // any store from this codepath.
        // Retrieve method according to sel
        method_t *m = search_method_list_inline(*mlists, sel);
        if (m) return m;
    }

    return nil;
}
Copy the code

search_method_list_inline

ALWAYS_INLINE static method_t *
search_method_list_inline(const method_list_t *mlist, SEL sel)
{
    // Ignore any flags in the top bits, just look at the bottom two.
    int methodListIsFixedUp = mlist->isFixedUp();
    // Whether there is a size
    int methodListHasExpectedSize = mlist->isExpectedSize();
    
    if (fastpath(methodListIsFixedUp && methodListHasExpectedSize)) {
        return findMethodInSortedMethodList(sel, mlist);
    } else {
        // Linear search of unsorted method list
        if (auto *m = findMethodInUnsortedMethodList(sel, mlist))
            return m;
    }

#if DEBUG
    // sanity-check negative results
    if (mlist->isFixedUp()) {
        for (auto& meth : *mlist) {
            if (meth.name() = = sel) {
                _objc_fatal("linear search worked when binary search did not"); }}}#endif

    return nil;
}
Copy the code

findMethodInSortedMethodList

ALWAYS_INLINE static method_t *
findMethodInSortedMethodList(SEL key, const method_list_t *list)
{
   // M1 uses samll
    if (list->isSmallList()) {
        if (CONFIG_SHARED_CACHE_RELATIVE_DIRECT_SELECTORS && objc::inSharedCache((uintptr_t)list)) {
            return findMethodInSortedMethodList(key, list, [](method_t &m) { return m.getSmallNameAsSEL(); });
        } else {
            return findMethodInSortedMethodList(key, list, [](method_t &m) { returnm.getSmallNameAsSELRef(); }); }}else {
        return findMethodInSortedMethodList(key, list, [](method_t &m) { returnm.big().name; }); }}Copy the code

FindMethodInSortedMethodList bisection algorithm

template<class getNameFunc>
ALWAYS_INLINE static method_t *
findMethodInSortedMethodList(SEL key.const method_list_t *list.const getNameFunc &getName)
{
    ASSERT(list);

    // take the first value
    auto first = list->begin();
    auto base = first;
    decltype(first) probe;

    uintptr_t keyValue = (uintptr_t)key;
    uint32_t count;
    
    // count = count / 2
    // probe = base + count / 2
    
    // For example: list->count = 9, to get the 8th, keyValue = I
    // 0 1 2 3 4 5 6 7 8 
    // a b c d e f g h i 
    // 1. base = 0, count = 9 , probe = 0 + 9 / 2 = 4, probeValue = e
    // keyValue ! = probeValue
    // keyValue > probeValue
    // base = probe + 1 = 4 + 1 = 5
    // count = 9 - 1 = 8
    // count = 8 / 2 = 4
    // 2. probe = 5 + 4 / 2 = 7
    // keyValue ! = probeValue
    // keyValue > probeValue
    // base = 7 + 1,
    // count = 4 - 1 = 3
    // count = 3 / 2 = 1
    // probe = 8 + 1 / 2 = 8
    // keyValue == probeValue
    
    // To get to the 0th, keyValue = a
    // 1. base = 0 , count = 9, probe = 0 + 9 / 2 = 4
    // keyValue ! = probeValue
    // keyValue < probeValue
    // 2. count = 9 / 2 = 4
    // probe = 0 + 4 / 2 = 2
    // keyValue ! = probeValue
    // keyValue < probeValue
    // 3. count = 4 / 2 = 2
    // probe = 0 + 2 / 2 = 1
    // keyValue ! = probeValue
    // keyValue < probeValue
    // 4. count = 1 / 2 = 0
    // probe = 0 + 0 /2 = 0
    // keyValue == probeValue

    for (count = list->count; count ! = 0; count > > = 1) {
        probe = base + (count >> 1);
        
        uintptr_t probeValue = (uintptr_t)getName(probe);
        
        if (keyValue = = probeValue) {
            // `probe` is a match.
            // Rewind looking for the *first* occurrence of this value.
            // This is required for correct category overrides.
            // The classification method overrides the main class method
            while (probe > first && keyValue = = (uintptr_t)getName((probe - 1))) {
                probe--;
            }
            return & *probe;
        }
        
        if (keyValue > probeValue) {
            base = probe + 1;
            count--; }}return nil;
}
Copy the code
  • With aprobeThe probe
  • abaseBase = 0
  • probe = base + count / 2That’s the middle position
  • When the target value is greater thanprobeWhen willbaseprobeGal.I don’t have to find the left-hand side because it’s greater than,
  • If it’s less than that, it’s on the left, base stays the same, count = count / 2, probe = count / 2, and then we check the size, and if it’s always less than that, we keep going all the way to 2 until count = 1