Cache_t structure

Here is the underlying structure of the class

Cache_t has the following structure

/**
 
 架构 : arm64
 模拟器 : i386
 mac : x86_64
 
 */
struct cache_t {
private:
    explicit_atomic<uintptr_t> _bucketsAndMaybeMask; // 8
    union {
        struct {
            explicit_atomic<mask_t>    _maybeMask; // 4
#if __LP64__
            uint16_t                   _flags;  // 2
#endif
            uint16_t                   _occupied; // 2
        };
        explicit_atomic<preopt_cache_t *> _originalPreoptCache; // 8
    };

#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
    // _bucketsAndMaybeMask is a buckets_t pointer
    // _maybeMask is the buckets mask

    static constexpr uintptr_t bucketsMask = ~0ul;
    static_assert(!CONFIG_USE_PREOPT_CACHES, "preoptimized caches not supported");
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16_BIG_ADDRS
    static constexpr uintptr_t maskShift = 48;
    static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
    static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << maskShift) - 1;
    
    static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
#if CONFIG_USE_PREOPT_CACHES
    static constexpr uintptr_t preoptBucketsMarker = 1ul;
    static constexpr uintptr_t preoptBucketsMask = bucketsMask & ~preoptBucketsMarker;
#endif
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
    // _bucketsAndMaybeMask is a buckets_t pointer in the low 48 bits
    // _maybeMask is unused, the mask is stored in the top 16 bits.

    // How much the mask is shifted by.
    static constexpr uintptr_t maskShift = 48;

    // Additional bits after the mask which must be zero. msgSend
    // takes advantage of these additional bits to construct the value
    // `mask << 4` from `_maskAndBuckets` in a single instruction.
    static constexpr uintptr_t maskZeroBits = 4;

    // The largest mask value we can store.
    static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
    
    // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
    static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1;
    
    // Ensure we have enough bits for the buckets pointer.
    static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS,
            "Bucket field doesn't have enough bits for arbitrary pointers.");

#if CONFIG_USE_PREOPT_CACHES
    static constexpr uintptr_t preoptBucketsMarker = 1ul;
#if __has_feature(ptrauth_calls)
    // 63..60: hash_mask_shift
    // 59..55: hash_shift
    // 54.. 1: buckets ptr + auth
    //      0: always 1
    static constexpr uintptr_t preoptBucketsMask = 0x007ffffffffffffe;
    static inline uintptr_t preoptBucketsHashParams(const preopt_cache_t *cache) {
        uintptr_t value = (uintptr_t)cache->shift << 55;
        // masks have 11 bits but can be 0, so we compute
        // the right shift for 0x7fff rather than 0xffff
        return value | ((objc::mask16ShiftBits(cache->mask) - 1) << 60);
    }
#else
    // 63..53: hash_mask
    // 52..48: hash_shift
    // 47.. 1: buckets ptr
    //      0: always 1
    static constexpr uintptr_t preoptBucketsMask = 0x0000fffffffffffe;
    static inline uintptr_t preoptBucketsHashParams(const preopt_cache_t *cache) {
        return (uintptr_t)cache->hash_params << 48;
    }
#endif
#endif // CONFIG_USE_PREOPT_CACHES
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
    // _bucketsAndMaybeMask is a buckets_t pointer in the top 28 bits
    // _maybeMask is unused, the mask length is stored in the low 4 bits

    static constexpr uintptr_t maskBits = 4;
    static constexpr uintptr_t maskMask = (1 << maskBits) - 1;
    static constexpr uintptr_t bucketsMask = ~maskMask;
    static_assert(!CONFIG_USE_PREOPT_CACHES, "preoptimized caches not supported");
#else
#error Unknown cache mask storage type.
#endif

    bool isConstantEmptyCache() const;
    bool canBeFreed() const;
    mask_t mask() const;

#if CONFIG_USE_PREOPT_CACHES
    void initializeToPreoptCacheInDisguise(const preopt_cache_t *cache);
    const preopt_cache_t *disguised_preopt_cache() const;
#endif

    void incrementOccupied();
    void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);

    void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld);
    void collect_free(bucket_t *oldBuckets, mask_t oldCapacity);

    static bucket_t *emptyBuckets();
    static bucket_t *allocateBuckets(mask_t newCapacity);
    static bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true);
    static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
    void bad_cache(id receiver, SEL sel) __attribute__((noreturn, cold));

public:
    // The following four fields are public for objcdt's use only.
    // objcdt reaches into fields while the process is suspended
    // hence doesn't care for locks and pesky little details like this
    // and can safely use these.
    unsigned capacity() const;
    struct bucket_t *buckets() const;
    Class cls() const;

#if CONFIG_USE_PREOPT_CACHES
    const preopt_cache_t *preopt_cache() const;
#endif

    mask_t occupied() const;
    void initializeToEmpty();

#if CONFIG_USE_PREOPT_CACHES
    bool isConstantOptimizedCache(bool strict = false, uintptr_t empty_addr = (uintptr_t)&_objc_empty_cache) const;
    bool shouldFlush(SEL sel, IMP imp) const;
    bool isConstantOptimizedCacheWithInlinedSels() const;
    Class preoptFallbackClass() const;
    void maybeConvertToPreoptimized();
    void initializeToEmptyOrPreoptimizedInDisguise();
#else
    inline bool isConstantOptimizedCache(bool strict = false, uintptr_t empty_addr = 0) const { return false; }
    inline bool shouldFlush(SEL sel, IMP imp) const {
        return cache_getImp(cls(), sel) == imp;
    }
    inline bool isConstantOptimizedCacheWithInlinedSels() const { return false; }
    inline void initializeToEmptyOrPreoptimizedInDisguise() { initializeToEmpty(); }
#endif

    void insert(SEL sel, IMP imp, id receiver);
    void copyCacheNolock(objc_imp_cache_entry *buffer, int len);
    void destroy();
    void eraseNolock(const char *func);

    static void init();
    static void collectNolock(bool collectALot);
    static size_t bytesForCapacity(uint32_t cap);

#if __LP64__
    bool getBit(uint16_t flags) const {
        return _flags & flags;
    }
    void setBit(uint16_t set) {
        __c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags, set, __ATOMIC_RELAXED);
    }
    void clearBit(uint16_t clear) {
        __c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags, ~clear, __ATOMIC_RELAXED);
    }
#endif

#if FAST_CACHE_ALLOC_MASK
    bool hasFastInstanceSize(size_t extra) const
    {
        if (__builtin_constant_p(extra) && extra == 0) {
            return _flags & FAST_CACHE_ALLOC_MASK16;
        }
        return _flags & FAST_CACHE_ALLOC_MASK;
    }

    size_t fastInstanceSize(size_t extra) const
    {
        ASSERT(hasFastInstanceSize(extra));

        if (__builtin_constant_p(extra) && extra == 0) {
            return _flags & FAST_CACHE_ALLOC_MASK16;
        } else {
            size_t size = _flags & FAST_CACHE_ALLOC_MASK;
            // remove the FAST_CACHE_ALLOC_DELTA16 that was added
            // by setFastInstanceSize
            return align16(size + extra - FAST_CACHE_ALLOC_DELTA16);
        }
    }

    void setFastInstanceSize(size_t newSize)
    {
        // Set during realization or construction only. No locking needed.
        uint16_t newBits = _flags & ~FAST_CACHE_ALLOC_MASK;
        uint16_t sizeBits;

        // Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
        // to yield the proper 16byte aligned allocation size with a single mask
        sizeBits = word_align(newSize) + FAST_CACHE_ALLOC_DELTA16;
        sizeBits &= FAST_CACHE_ALLOC_MASK;
        if (newSize <= sizeBits) {
            newBits |= sizeBits;
        }
        _flags = newBits;
    }
#else
    bool hasFastInstanceSize(size_t extra) const {
        return false;
    }
    size_t fastInstanceSize(size_t extra) const {
        abort();
    }
    void setFastInstanceSize(size_t extra) {
        // nothing
    }
#endif
};
Copy the code

_bucketsAndMaybeMask is a BucketS_t pointer to a structure of type bucket_t.

struct bucket_t { private: // IMP-first is better for arm64e ptrauth and no worse for arm64. // SEL-first is better for armv7* and i386 and x86_64.  #if __arm64__ explicit_atomic<uintptr_t> _imp; explicit_atomic<SEL> _sel; #else explicit_atomic<SEL> _sel; explicit_atomic<uintptr_t> _imp; #endif // Compute the ptrauth signing modifier from &_imp, newSel, and cls. uintptr_t modifierForSEL(bucket_t *base, SEL newSel, Class cls) const { return (uintptr_t)base ^ (uintptr_t)newSel ^ (uintptr_t)cls; } // Sign newImp, with &_imp, newSel, and cls as modifiers. uintptr_t encodeImp(UNUSED_WITHOUT_PTRAUTH bucket_t *base, IMP newImp, UNUSED_WITHOUT_PTRAUTH SEL newSel, Class cls) const { if (! newImp) return 0; #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH return (uintptr_t) ptrauth_auth_and_resign(newImp, ptrauth_key_function_pointer, 0, ptrauth_key_process_dependent_code, modifierForSEL(base, newSel, cls)); #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR return (uintptr_t)newImp ^ (uintptr_t)cls; #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE return (uintptr_t)newImp; #else #error Unknown method cache IMP encoding. #endif } public: static inline size_t offsetOfSel() { return offsetof(bucket_t, _sel); } inline SEL sel() const { return _sel.load(memory_order_relaxed); } #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR #define MAYBE_UNUSED_ISA #else #define MAYBE_UNUSED_ISA __attribute__((unused)) #endif inline IMP rawImp(MAYBE_UNUSED_ISA objc_class *cls) const { uintptr_t imp = _imp.load(memory_order_relaxed); if (! imp) return nil; #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR imp ^= (uintptr_t)cls; #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE #else #error Unknown method cache IMP encoding. #endif return (IMP)imp; } inline IMP imp(UNUSED_WITHOUT_PTRAUTH bucket_t *base, Class cls) const { uintptr_t imp = _imp.load(memory_order_relaxed); if (! imp) return nil; #if CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_PTRAUTH SEL sel = _sel.load(memory_order_relaxed); return (IMP) ptrauth_auth_and_resign((const void *)imp, ptrauth_key_process_dependent_code, modifierForSEL(base, sel, cls), ptrauth_key_function_pointer, 0); #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_ISA_XOR return (IMP)(imp ^ (uintptr_t)cls); #elif CACHE_IMP_ENCODING == CACHE_IMP_ENCODING_NONE return (IMP)imp; #else #error Unknown method cache IMP encoding. #endif } template <Atomicity, IMPEncoding> void set(bucket_t *base, SEL newSel, IMP newImp, Class cls); };Copy the code

From the properties and methods of bucket_t, we can see that it is associated with IMP — bucket_t is actually a bucket that holds the IMP method implementation and its key. Sel-imp: sel-IMP: sel-IMP: SEL-IMP: SEL-IMP: SEL-IMP: SEL-IMP: SEL-IMP The overall structure is shown in the figure below

Find SEL-IMP in cache

  • Find through source code – LLDB debugging
  • Look for the source code in the project

Create a LGPerson class and define an attribute and an instance method and its implementation

LLDB debugging
(lldb) p/x pClass (Class) $0 = 0x0000000100008430 LGPerson (lldb) p (cache_t *)0x0000000100008430 (cache_t *) $1 = 0x0000000100008430 (lldb) p *$1 (cache_t) $2 = { _bucketsAndMaybeMask = { std::__1::atomic<unsigned long> = { Value = 4295001096 } } = { = { _maybeMask = { std::__1::atomic<unsigned int> = { Value = 3580224 } } _flags = 1 _occupied = 0 } _originalPreoptCache = { std::__1::atomic<preopt_cache_t *> = { Value = 0x000000010036a140 } } } } (lldb) p [p sayHello] 2021-06-28 09:35:47.301192+0800 KCObjcBuild[1254:26104] -[LGPerson sayHello] (LLDB) p pClass (Class) $3 = LGPerson (lldb) p/x pClass (Class) $4 = 0x0000000100008430 LGPerson (lldb) p (cache_t *)0x0000000100008430 (cache_t *) $5 = 0x0000000100008430 (lldb) p 0x0000000100008430+0x10 (long) $6 = 4295001152 (lldb) p *$6 error: <user expression 8>:1:1: indirection requires pointer operand ('long' invalid) *$6 ^~~ (lldb) p/x 0x0000000100008430+0x10 (long) $7 = 0x0000000100008440 (lldb) p *$7 error: <user expression 10>:1:1: indirection requires pointer operand ('long' invalid) *$7 ^~~ (lldb) p (cache_t *)0x0000000100008440 (cache_t *) $8 = 0x0000000100008440 (lldb) p *$8 (cache_t) $9 = { _bucketsAndMaybeMask = { std::__1::atomic<unsigned long> = { Value = 4302790864 } } = { = { _maybeMask = { std::__1::atomic<unsigned int> = { Value = 7 } } _flags = 32808 _occupied = 1 } _originalPreoptCache = { std::__1::atomic<preopt_cache_t *> = { Value = 0x0001802800000007 } } } } (lldb) p $8.buckets()  (bucket_t *) $10 = 0x00000001007760d0 Fix-it applied, fixed expression was: $8->buckets() (lldb) p $10.sel() (SEL) $11 = (null) Fix-it applied, fixed expression was: $10->sel() (lldb) p $10.imp(nil,pClass) (IMP) $12 = 0x0000000000000000 Fix-it applied, fixed expression was: $10->imp(nil,pClass) (lldb) p *$10 (bucket_t) $13 = { _sel = { std::__1::atomic<objc_selector *> = (null) { Value = nil } } _imp = { std::__1::atomic<unsigned long> = { Value = 0 } } } (lldb) p $8.buckets()[1] (bucket_t) $14 = { _sel = { std::__1::atomic<objc_selector *> = "" { Value = "" } } _imp = { std::__1::atomic<unsigned long> = { Value = 47104 } } }  Fix-it applied, fixed expression was: $8->buckets()[1] (lldb) p $14.imp(nil,pClass) (IMP) $15 = 0x0000000100003c30 (KCObjcBuild`-[LGPerson sayHello])Copy the code
  • To obtain the cache attribute, the first address of the pclass is translated by 16 bytes (8 bytes for isa pointer and 8 bytes for superclass pointer), that is, the first address +0x10 to obtain the cache address

  • From the source analysis, we know that SEL-IMP is in the _buckets attribute of cache_T (currently in macOS), and that cache_t provides a method to get the _buckets attribute buckets()

  • If you get the _buckets attribute, you can get SEL-IMP. The bucket_t structure also provides the corresponding sel() and IMP (UNUSED_WITHOUT_PTRAUTH bucket_t *base, Class CLS).

If a method is not called, the cache is not cached. If a method is called, the cache is cached once.

Look for the source code in the project
typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits struct kc_bucket_t { SEL _sel; IMP _imp; }; struct kc_cache_t { struct kc_bucket_t *_bukets; // 8 mask_t _maybeMask; // 4 uint16_t _flags; // 2 uint16_t _occupied; / / 2}; struct kc_class_data_bits_t { uintptr_t bits; }; // cache class struct kc_objc_class { Class isa; Class superclass; struct kc_cache_t cache; // formerly cache pointer and vtable struct kc_class_data_bits_t bits; }; int main(int argc, const char * argv[]) { @autoreleasepool { LGPerson *p = [LGPerson alloc]; Class pClass = p.class; // objc_clas [p say1]; [p say2]; [p say3]; [p say4]; [p say1]; [p say2]; // [p say3]; [pClass sayHappy]; struct kc_objc_class *kc_class = (__bridge struct kc_objc_class *)(pClass); NSLog(@"%hu - %u",kc_class->cache._occupied,kc_class->cache._maybeMask); // 0-8136976 count // 1-3 // 1: source code cannot be debug // 2: LLDB // 3: small sample // low-level principle // a: 1-3 -> 1-7 // b: (null) -0x0 // c: 2-7 + say4-0xB850 + no class method // d: NSObject for (mask_t I = 0; i<kc_class->cache._maybeMask; i++) { struct kc_bucket_t bucket = kc_class->cache._bukets[i]; NSLog(@"%@ - %pf",NSStringFromSelector(bucket._sel),bucket._imp); } NSLog(@"Hello, World!" ); } return 0; }Copy the code

The ISA property of objc_class is inherited from objc_Object, but when we copied it from objc_Object, we removed the inheritance relationship of objc_class. We need to make this property explicit, otherwise we will print the result with a problem, as shown in the following figure.

Add the ISA attribute, add the two method calls, and the correct print should look like this

Add two method calls, uncomment say3 and say4, and print the following

In view of the above printed results, there are several questions

  1. What is _mask?
  2. What is _occupied?
  3. Why does the printed version of occupied and Mask change as method calls increase?
  4. Why is bucket data missing? For example, in 2-7, only say3 and say4 have function Pointers
  5. Why is “say4” printed first and “say3” printed next to “say3” printed next to “say3” in 2-7?
  6. Why does _ocupied in printed cache_t start at 2?

With these questions in mind, let’s explore the underlying principles of cache

Cache_t Underlying mechanism analysis

  • We start with the _mask property in cache_t and look for the function that causes the change in cache_t. We find incrementOccupied()

The concrete implementation of this function is

void cache_t::incrementOccupied() 
{
    _occupied++;
}

Copy the code
  • In the source code, a global search for the incrementOccupied() function reveals that it is only called in the INSERT method of cache_t

  • Cache_t (sel-IMP); sel-IMP (SEL-IMP)

  • A global search for cache_T :: INSERT finds that before writing, there is one more step, a cache read, which looks for SEL-IMP, as shown below

Insert method analysis

In the INSERT method, the source code is implemented as follows

void cache_t::insert(SEL sel, IMP imp, id receiver) { runtimeLock.assertLocked(); // Never cache before +initialize is done if (slowpath(! cls()->isInitialized())) { return; } if (isConstantOptimizedCache()) { _objc_fatal("cache_t::insert() called with a preoptimized cache for %s", cls()->nameForLogging()); } #if DEBUG_TASK_THREADS return _collecting_in_critical(); #else #if CONFIG_USE_CACHE_LOCK mutex_locker_t lock(cacheUpdateLock); #endif ASSERT(sel ! = 0 && cls()->isInitialized()); // Use the cache as-is if until we exceed our expected fill ratio. mask_t newOccupied = occupied() + 1; // 1+1 unsigned oldCapacity = capacity(), capacity = oldCapacity; if (slowpath(isConstantEmptyCache())) { // Cache is read-only. Replace it. if (! capacity) capacity = INIT_CACHE_SIZE; //4 reallocate(oldCapacity, capacity, /* freeOld */false); } else if (fastpath(newOccupied + CACHE_END_MARKER <= cache_fill_ratio(capacity))) { // Cache is less than 3/4 or 7/8 full. Use it as-is. } #if CACHE_ALLOW_FULL_UTILIZATION else if (capacity <= FULL_UTILIZATION_CACHE_SIZE && newOccupied +  CACHE_END_MARKER <= capacity) { // Allow 100% cache utilization for small buckets. Use it as-is. } #endif else {// 4*2 = 8 capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE; if (capacity > MAX_CACHE_SIZE) { capacity = MAX_CACHE_SIZE; } reallocate(oldCapacity, capacity, true); } bucket_t *b = buckets(); mask_t m = capacity - 1; // 4-1=3 mask_t begin = cache_hash(sel, m); mask_t i = begin; // Scan for the first unused slot and insert there. // There is guaranteed to be an empty slot. do { if (fastpath(b[i].sel() == 0)) { incrementOccupied(); b[i].set<Atomic, Encoded>(b, sel, imp, cls()); return; } if (b[i].sel() == sel) { // The entry was added to the cache by some other thread // before we grabbed the cacheUpdateLock. return; } } while (fastpath((i = cache_next(i, m)) ! = begin)); bad_cache(receiver, (SEL)sel); #endif // ! DEBUG_TASK_THREADS }Copy the code

It is mainly divided into the following parts

  • [Step 1] Calculate the current cache usage
  • [Step 2] Determine the operation to be performed based on the cache usage
  • [Step 3] Perform internal IMP and SEL assignments for the buckets to be stored
[Step 1] Calculate the current cache usage

Mask_t newOccupied = occupied() + 1; mask_t newOccupied = occupied() + 1;

Regarding the calculation of cache usage, there are the following notes:

  • If init is called again, the occupied object will also +1
  • The set method is implicitly called when there are attribute assignments, and the occupied method is incremented. If there are several attribute assignments, the occupied method will be added to the previous one
  • When a method is called, occupied is also increased; that is, for a few calls, occupied is added to the previous number
[Step 2] Determine the operation to be performed based on the cache usage
  • If it is the first creation, four are created by default
  • If the cache usage is less than or equal to 3/4, no processing is done
  • If the cache usage exceeds 3/4, you need to double the capacity and respace
Realloc approach: Open up space

This method, used for both the first creation and the double expansion, has the following steps

  • AllocateBuckets method: apply to the system to open memory, that is, open bucket, bucket is only a temporary variable
  • The setBucketsAndMask method stores the temporary bucket in the cache in one of two cases:
    • If true, store according to the location of bucket and mask, and set occupied occupied occupied to 0
    • If not, store buckets and masks and set occupation to 0
  • If you have old buckets, you need to clear the previous cache by calling the collect_free method.
    • _garbage_make_room method: Create garbage collection space
      • If this is the first time, you need to allocate reclamation space
      • If it is not the first time, the memory segment is enlarged, that is, the original memory *2
    • Record the bucket stored this time
    • Cache_collect Method: Garbage collection to clean up old buckets
[Step 3] Perform internal IMP and SEL assignments for the buckets to be stored

In this section, the cache_hash method, or hash algorithm, is used to calculate the hash subscripts stored in SEL-IMP in the following three cases:

  • If sel = 0, store SEL-IMP and increment the occupied value by 1

  • Returns directly if the sel stored in the current hash subscript is equal to the sel to be inserted

  • If the sel stored in the current hash subscript is not equal to the sel to be inserted, the cache_next (hash collision) algorithm is used to re-hash the sel to obtain the new subscript and then compare and store the sel

The source code of the two hash algorithms involved is as follows

  • Cache_hash: Hash algorithm
static inline mask_t cache_hash(SEL sel, mask_t mask) { return (mask_t)(uintptr_t)sel & mask; // select sel & mask (mask = cap-1)}Copy the code
  • Cache_next: Hash collision algorithm
#if __arm__ || __x86_64__ || __i386__ // objc_msgSend has few registers available. // Cache scan increments and wraps at  special end-marking bucket. #define CACHE_END_MARKER 1 static inline mask_t cache_next(mask_t i, mask_t mask) { return (i+1) & mask; // (the current hash subscript +1) &mask, re-hash, Decrements. No end marker. // Cache scan decrements needed. #define CACHE_END_MARKER 0 static inline mask_t cache_next(mask_t i, mask_t mask) { return i ? i-1 : mask; // if I is empty, then I = mask, mask = cap-1, if not empty, then I = 1, forward insert sel-imp}Copy the code

Cache_t doubt point

Why do you expand at 3/4

In hash this kind of data structure, there is a concept used to represent the number called a load factor of space, the greater the load factor, the less idle position, the more conflict, the performance of the hash table will decline Load factor is three-quarters of the time, space utilization rate is higher, but also avoid the considerable Hash conflict, improve the efficiency of spaceCopy the code