When was the object object added to autoReleasepool?

Where is the reference count for object stored?

autoreleasepool

Each runloop creates an AutoReleasepool at run time, which releases the object to the sentinel object (nil) while the Runloop sleeps.

Where is the object reference technology stored

In 64-bit, reference counts can be stored directly in the optimized ISA pointer


union isa_t { / / a consortium
    isa_t() { }
    isa_t(uintptr_t value) : bits(value) { }

    Class cls;
    uintptr_t bits;
    struct {
        uintptr_t nonpointer        : 1;                                         \
        uintptr_t has_assoc         : 1;                                         \
        uintptr_t has_cxx_dtor      : 1;                                         \
        uintptr_t shiftcls          : 44; /*MACH_VM_MAX_ADDRESS 0x7fffffe00000*/ \
        uintptr_t magic             : 6;                                         \
        uintptr_t weakly_referenced : 1;                                         \
        uintptr_t deallocating      : 1;                                         \
        uintptr_t has_sidetable_rc  : 1;                                         \
        uintptr_t extra_rc          : 8 // Reference count
    };  
};

Copy the code

See the implementation of retainCount


objc_object::rootRetainCount()
{
    if (isTaggedPointer()) return (uintptr_t)this; / / whether taggedPointer

    sidetable_lock(); / / thread lock
    isa_t bits = LoadExclusive(&isa.bits); // Get the bits of ISA
    ClearExclusive(&isa.bits);
    if (bits.nonpointer) { // bits 是不是nonpointer
        uintptr_t rc = 1 + bits.extra_rc; // extra_rc + 1
        if (bits.has_sidetable_rc) { // Use sideTable to save reference count
            rc + = sidetable_getExtraRC_nolock(); // Get the object reference count saved in sideTable
        }
        sidetable_unlock(); / / unlock
        return rc; // Return the count
    }

    sidetable_unlock(); / / unlock
    // If it is not nonpointer
    return sidetable_retainCount();
}

size_t 
objc_object::sidetable_getExtraRC_nolock()
{
    ASSERT(isa.nonpointer); // ISA is nonpointer optimization
    SideTable& table = SideTables()[this]; // sideTable containing the current object
    RefcountMap::iterator it = table.refcnts.find(this); // Find the RefcountMap of the object
    if (it = = table.refcnts.end()) return 0;
    else return it->second >> SIDE_TABLE_RC_SHIFT;
}

// RefcountMap disguises its pointers because we 
// don't want the table to act as a root for `leaks`.
typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,RefcountMapValuePurgeable> RefcountMap;

uintptr_t
objc_object::sidetable_retainCount()
{
    SideTable& table = SideTables()[this]; // Get the sideTable containing the object in sideTables with the object as the key

    size_t refcnt_result = 1; / / 1
    
    table.lock(); / / thread lock
    RefcountMap::iterator it = table.refcnts.find(this); // Find the refcnts of this object in sideTable
    if (it ! = table.refcnts.end()) { // If not...
        // this is valid for SIDE_TABLE_RC_PINNED too
        refcnt_result + = it->second >> SIDE_TABLE_RC_SHIFT; // SIDE_TABLE_RC_SHIFT = 2
    }
    table.unlock(); / / unlock
    return refcnt_result;
}
      
Copy the code

RootRetainCount reads the bits in the isa pointer of the object when +1 is given to the reference count of the object.

  1. If bits isnonpointer, indicating that the ISA pointer is optimized, including not only the line address, but also the class information, object reference count, and so on.

Bits uses the extra_rc (+1) to check whether bits uses has_sideTABLE_rc (+1) to hold the reference count, and returns the count found in the sidetable.

  1. If it is notnonpointerThe optimized ISA pointer is found in the sideTables array containing the sideTable object. Return count.

What is SideTable?

We saw above that the reference count of the object is partially put into the SideTable. What is the SideTable?

The program starts by creating 64 SideTables into the SideTables array.


struct SideTable {
    spinlock_t slock;   / / the spin lock
    RefcountMap refcnts;    // Save the Map of the object reference count
    weak_table_t weak_table;    // Weak reference table

    SideTable() {   // constructor
        memset(&weak_table, 0, sizeof(weak_table));
    }

    ~SideTable() {  // destructor
        _objc_fatal("Do not delete SideTable.");
    }

    void lock() { slock.lock(); }   / / lock
    void unlock() { slock.unlock(); }   / / unlock
    void forceReset() { slock.forceReset(); }

    // Address-ordered lock discipline for a pair of side tables.

    template<HaveOld.HaveNew>
    static void lockTwo(SideTable *lock1, SideTable *lock2);
    template<HaveOld.HaveNew>
    static void unlockTwo(SideTable *lock1, SideTable *lock2);
};

Copy the code

You can see that SideTable also contains weak reference tables.

What happens when the reference count +1?


ALWAYS_INLINE id 
objc_object::rootRetain(bool tryRetain, bool handleOverflow)
{
    if (isTaggedPointer()) return (id)this; // If it is taggedPointer, there is no operation

    bool sideTableLocked = false;
    bool transcribeToSideTable = false;

    isa_t oldisa;
    isa_t newisa;

    do {
        transcribeToSideTable = false;
        oldisa = LoadExclusive(&isa.bits);
        newisa = oldisa;
        if (slowpath(!newisa.nonpointer)) {
            ClearExclusive(&isa.bits);
            if (rawISA()->isMetaClass()) return (id)this;
            if (!tryRetain && sideTableLocked) sidetable_unlock();
            if (tryRetain) return sidetable_tryRetain() ? (id)this : nil;
            else return sidetable_retain();
        }
        // don't check newisa.fast_rr; we already called any RR overrides
        if (slowpath(tryRetain && newisa.deallocating)) {
            ClearExclusive(&isa.bits);
            if (!tryRetain && sideTableLocked) sidetable_unlock();
            return nil;
        }
        uintptr_t carry;
        newisa.bits = addc(newisa.bits, RC_ONE.0.&carry);  // extra_rc++

        if (slowpath(carry)) {
            // newisa.extra_rc++ overflowed
            if (!handleOverflow) {
                ClearExclusive(&isa.bits);
                return rootRetain_overflow(tryRetain);
            }
            // Leave half of the retain counts inline and 
            // prepare to copy the other half to the side table.
            if (!tryRetain && !sideTableLocked) sidetable_lock();
            sideTableLocked = true;
            transcribeToSideTable = true;
            newisa.extra_rc = RC_HALF;
            newisa.has_sidetable_rc = true; }}while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)));

    if (slowpath(transcribeToSideTable)) {
        // Copy the other half of the retain counts to the side table.
        sidetable_addExtraRC_nolock(RC_HALF);
    }

    if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock();
    return (id)this;
}

Copy the code

You can see comments for extra_rc++.

What does the weak modified object do?

Objc_initWeak is called when an __weak modified object is initialized.


d
objc_initWeak(id *location, id newObj)
{
    if (!newObj) {
        *location = nil;
        return nil;
    }

    return storeWeak<DontHaveOld.DoHaveNew.DoCrashIfDeallocating>
        (location, (objc_object*)newObj);
}

static id 
storeWeak(id *location, objc_object *newObj)
{
    ASSERT(haveOld  ||  haveNew);
    if (!haveNew) ASSERT(newObj = = nil); //obj is not null

    Class previouslyInitializedClass = nil;
    id oldObj;
    SideTable *oldTable;
    SideTable *newTable;

    // Acquire locks for old and new values.
    // Order by lock address to prevent lock ordering problems. 
    // Retry if the old value changes underneath us.
 retry:
    if (haveOld) { / / the old values
        oldObj = *location;
        oldTable = &SideTables()[oldObj];
    } else {
        oldTable = nil;
    }
    if (haveNew) {  / / a new value
        newTable = &SideTables()[newObj];
    } else {
        newTable = nil;
    }

    SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);   / / lock

    if (haveOld  &&  *location ! = oldObj) {
        SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
        goto retry;
    }

    if (haveNew  &&  newObj) {
        Class cls = newObj->getIsa();/ / class object
        if (cls ! = previouslyInitializedClass  &&  
            !((objc_class *)cls)->isInitialized())  // obj is not initialized
        {
            SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
            class_initialize(cls, (id)newObj);  // obj is initialized

            previouslyInitializedClass =cls; goto retry; }}// Clean up old value, if any.
    if (haveOld) {
        weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
    }

    // Assign new value, if any.
    if (haveNew) {
        newObj = (objc_object *)
            weak_register_no_lock(&newTable->weak_table, (id)newObj, location, 
                                  crashIfDeallocating);
        // weak_register_no_lock returns nil if weak store should be rejected

        // Set is-weakly-referenced bit in refcount table.
        if (newObj  &&  !newObj->isTaggedPointer()) {
            newObj->setWeaklyReferenced_nolock();
        }

        // Do not set *location anywhere else. That would introduce a race.
        *location = (id)newObj;
    }
    else {
        // No new value. The storage is not changed.
    }
    
    SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);

    return (id)newObj;
}

inline void
objc_object::setWeaklyReferenced_nolock()
{
 retry:
    isa_t oldisa = LoadExclusive(&isa.bits);
    isa_t newisa = oldisa;
    if (slowpath(!newisa.nonpointer)) {
        ClearExclusive(&isa.bits);
        sidetable_setWeaklyReferenced_nolock();
        return;
    }
    if (newisa.weakly_referenced) {
        ClearExclusive(&isa.bits);
        return;
    }
    newisa.weakly_referenced = true;
    if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
}

Copy the code

Obj is added to weak_table and is also changed to 1 if Weakly_referenced is 0.