Objc4 source code is based on version 779.1. The article is purely personal learning views, fallacies, hope to correct.

release

Method call stack

    • (oneway void)release
      • _objc_rootRelease(id obj)
        • objc_object::rootRelease()
          • objc_object::rootRelease(bool performDealloc, bool handleUnderflow)
            • sidetable_release

Source detailing

  1. NSObject.mm – line:2296
- (oneway void)release {
    _objc_rootRelease(self);
}
Copy the code
  1. NSObject.mm – line:1687
NEVER_INLINE void
_objc_rootRelease(id obj)
{
    ASSERT(obj);

    obj->rootRelease();
}
Copy the code
  1. objc-object.h – line:571
ALWAYS_INLINE bool 
objc_object::rootRelease()
{
    return rootRelease(true, false);
}
Copy the code
  1. objc-object.h – line:583
ALWAYS_INLINE bool objc_object::rootRelease(bool performDealloc, bool handleUnderflow) { if (isTaggedPointer()) return false; bool sideTableLocked = false; isa_t oldisa; isa_t newisa; retry: do { oldisa = LoadExclusive(&isa.bits); newisa = oldisa; if (slowpath(! newisa.nonpointer)) { ClearExclusive(&isa.bits); if (rawISA()->isMetaClass()) return false; if (sideTableLocked) sidetable_unlock(); return sidetable_release(performDealloc); } // don't check newisa.fast_rr; we already called any RR overrides uintptr_t carry; Extra_rc = 255 newISa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc-- if (slowpath(carry)) { // don't ClearExclusive() goto underflow; } } while (slowpath(! StoreReleaseExclusive(&isa.bits, oldisa.bits, newisa.bits))); if (slowpath(sideTableLocked)) sidetable_unlock(); return false; underflow: // newisa.extra_rc-- underflowed: borrow from side table or deallocate // abandon newisa to undo the decrement newisa = oldisa; if (slowpath(newisa.has_sidetable_rc)) { if (! handleUnderflow) { ClearExclusive(&isa.bits); Return rootRelease_underflow(performDealloc); return rootRelease_underflow(performDealloc); // Transfer retain count from side table to inline storage. // Transfer retain count from side table to inline storage. sideTableLocked) { ClearExclusive(&isa.bits); sidetable_lock(); sideTableLocked = true; // Need to start over to avoid a race against // the nonpointer -> raw pointer transition. goto retry; } // Try to remove some retain counts from the side table. // Try to remove some retain counts from the side table sidetable_subExtraRC_nolock(RC_HALF); // To avoid races, Has_sidetable_rc must remain set // even if the sidetable count is now zero. if (borrowed > 0) {// The reference count is greater than 0 // side Table retain count decreased. // Try to add them to the inline count. // Subtract 1 to newisa.extra_rc newisa.extra_rc = borrowed - 1; Bool Stored = StoreReleaseExclusive(& ISa. bits, oldisa.bits, newisa.bits); if (! Stored) {// Failed to store, SAO operating a wave again. / / the Inline update failed. / / the Try it again right now. This prevents livelock on LL/SC / / architectures where the side table access itself may have // dropped the reservation. isa_t oldisa2 = LoadExclusive(&isa.bits); isa_t newisa2 = oldisa2; if (newisa2.nonpointer) { uintptr_t overflow; newisa2.bits = addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow); if (! overflow) { stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits, newisa2.bits); } } } if (! Stored) {// Inline update failed. // Put the retains back in the side table. Drop sidetable sidetable_addExtraRC_nolock(borrowed); goto retry; } // Decrement successful after borrowing from side table. // This decrement cannot be the deallocating decrement - the side // table lock and has_sidetable_rc bit ensure that if everyone // else tried to -release while we worked, the last one would block. sidetable_unlock(); return false; } else { // Side table is empty after all. Fall-through to the dealloc path. } } // Really deallocate. if (slowpath(newisa.deallocating)) { ClearExclusive(&isa.bits); if (sideTableLocked) sidetable_unlock(); Return overrelease_error(); // does not actually return } newisa.deallocating = true; if (! StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; if (slowpath(sideTableLocked)) sidetable_unlock(); __c11_atomic_thread_fence(__ATOMIC_ACQUIRE); If (performDealloc) {// Call dealloc method ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc)); } return true; }Copy the code
  1. objc-object.h – line:1523
uintptr_t objc_object::sidetable_release(bool performDealloc) { #if SUPPORT_NONPOINTER_ISA ASSERT(! isa.nonpointer); #endif SideTable& table = SideTables()[this]; bool do_dealloc = false; table.lock(); Auto it = table.refcnt. try_emplace(this, SIDE_TABLE_DEALLOCATING); auto &refcnt = it.first->second; If (it. Second) {// mark dealloc do_dealloc = true; } else if (refcnt < SIDE_TABLE_DEALLOCATING) { // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it. // If the previously stored reference count value is 0, avoid negative numbers and mark dealloc do_dealloc = true; refcnt |= SIDE_TABLE_DEALLOCATING; } else if (! (refcnt & SIDE_TABLE_RC_PINNED)) {// reference count is reduced by refcnt -= SIDE_TABLE_RC_ONE; } table.unlock(); If (do_dealloc &&performDealloc) {// Call the dealloc method of the object ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc)); } return do_dealloc; }Copy the code