Class loading
The entry point for class loading is the _objc_init() function
void _objc_init(void) { static bool initialized = false; if (initialized) return; initialized = true; // Load runtime environment variables environ_init(); // bind thread key tls_init(); // run system-level C++ static constructor static_init(); Runtime_init (); runtime_init(); // initialize libobJC's exception_init(); Cache_init (); // Register pc-resettable address ranges for run-time startup tasks. _imp_implementationWithBlock_init(); // Initialize libobjC-trampolines.dylib to prevent imp_implementationWithBlock_init() from calling the imp_implementationWithBlock method earlier; // Dyld calls map_images when loading images into memory; // Initializing image calls load_images; Unmap_image _dyLD_OBJC_notify_register (&map_images, load_images, unmap_image); }Copy the code
The _dyLD_OBJC_notify_register () function is used at load time to call the corresponding function pointer at different times to perform the specified operation
map_images
// mhPaths holds the array of file addresses for all loaded Objective-C libraries // mhdirs holds all loaded Objective-C libraries */ void map_images(unsigned count, const char * const paths[], Const struct mach_header * const MHDRS []) {// mutex_locker_t lock(runtimeLock); return map_images_nolock(count, paths, mhdrs); }Copy the code
The map_images() function only does the locking, leaving the rest to the map_images_nolock() function below
map_images_nolock
void map_images_nolock(unsigned mhCount, const char * const mhPaths[], const struct mach_header * const mhdrs[]) { static bool firstTime = YES; header_info *hList[mhCount]; uint32_t hCount; size_t selrefCount = 0; If (firstTime) {preopt_init(); } // Save all objective-C metaclasses in the mirror hCount = 0; // How many objective-C classes does the mhPaths library contain? int unoptimizedTotalClasses = 0; { uint32_t i = mhCount; while (i--) { const headerType *mhdr = (const headerType *)mhdrs[i]; // Use MHDR data to generate a header_info struct type variable hi, and store the MHDR in this variable, and then add hi to the linked list. Add to totalClasses and totalClasses save Auto hi = addHeader(MHDR, mhPaths[I], totalClasses, unoptimizedTotalClasses); if (! hi) { continue; } if (mhdr->filetype == MH_EXECUTE) { #if __OBJC2__ size_t count; _getObjc2SelectorRefs(hi, &count); selrefCount += count; _getObjc2MessageRefs(hi, &count); selrefCount += count; #else _getObjcSelectorRefs(hi, &selrefCount); HList [hCount++] = hi; } if (firstTime) {// Initialize the list of methods and register internally used method sel_init(selrefCount); // Initializes the hash table that holds the reference count of an object // initializes the associated object manager arr_init(); } if (hCount > 0) {read_images(hList, hCount, totalClasses, unoptimizedTotalClasses); } firstTime = NO; For (auto func: loadImageFuncs) {for (uint32_t I = 0; i < mhCount; i++) { func(mhdrs[i]); }}}Copy the code
The map_images_nolock() function does its job by fetching information about classes and methods from the image, initializing the necessary container classes, and notifying the image that the loading is complete. But the core work is left to _read_images.
_read_images
void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int unoptimizedTotalClasses)
{
header_info *hi;
uint32_t hIndex;
size_t count;
size_t i;
Class *resolvedFutureClasses = nil;
size_t resolvedFutureClassCount = 0;
static bool doneOnce;
bool launchTime = NO;
TimeLogger ts(PrintImageTimes);
runtimeLock.assertLocked();
#define EACH_HEADER \
hIndex = 0; \
hIndex < hCount && (hi = hList[hIndex]); \
hIndex++
if (!doneOnce) {
doneOnce = YES;
launchTime = YES;
/*
这一部分是对 non-pointer 的判断
如果 Swift 是 3 之前的版本,禁用 non-pointer
如果 OS X 是 10.11 之前的版本,禁用 non-pointer
如果 hi 中有 __DATA 和 __objc_rawisa 数据段,禁用 non-pointer
*/
#if SUPPORT_NONPOINTER_ISA
// Disable non-pointer isa under some conditions.
# if SUPPORT_INDEXED_ISA
// Disable nonpointer isa if any image contains old Swift code
for (EACH_HEADER) {
if (hi->info()->containsSwift() &&
hi->info()->swiftUnstableVersion() < objc_image_info::SwiftVersion3)
{
DisableNonpointerIsa = true;
break;
}
}
# endif
# if TARGET_OS_OSX
// Disable non-pointer isa if the app is too old
// (linked before OS X 10.11)
if (dyld_get_program_sdk_version() < DYLD_MACOSX_VERSION_10_11) {
DisableNonpointerIsa = true;
}
// Disable non-pointer isa if the app has a __DATA,__objc_rawisa section
// New apps that load old extensions may need this.
for (EACH_HEADER) {
if (hi->mhdr()->filetype != MH_EXECUTE) continue;
unsigned long size;
if (getsectiondata(hi->mhdr(), "__DATA", "__objc_rawisa", &size)) {
DisableNonpointerIsa = true;
}
break; // assume only one MH_EXECUTE image
}
# endif
#endif
// 如果禁用 TaggedPointers,就把相关的掩码置空
if (DisableTaggedPointers) {
disableTaggedPointers();
}
// 用随机的方法初始化 objc_debug_taggedpointer_obfuscator,混淆指针保护代码安全,让用户无法获取支持 TaggedPointers 的类
initializeTaggedPointerObfuscator();
/*
这一部分是创建保存类的表,容量为类数的四分之三
*/
// namedClasses
// Preoptimized classes don't go in this table.
// 4/3 is NXMapTable's load factor
int namedClassesSize =
(isPreoptimized() ? unoptimizedTotalClasses : totalClasses) * 4 / 3;
gdb_objc_realized_classes =
NXCreateMapTable(NXStrValueMapPrototype, namedClassesSize);
}
/*
这一部分是注册所有 SEL 并修正 @selector 的引用
通过 _getObjc2SelectorRefs 函数拿到所有的 SEL,
然后调用 sel_registerNameNoLock 对 SEL 进行注册,
最后通过注册返回 SEL 修正方法名相同的 SEL
*/
// Fix up @selector references
static size_t UnfixedSelectors;
{
mutex_locker_t lock(selLock);
for (EACH_HEADER) {
if (hi->hasPreoptimizedSelectors()) continue;
bool isBundle = hi->isBundle();
SEL *sels = _getObjc2SelectorRefs(hi, &count);
UnfixedSelectors += count;
for (i = 0; i < count; i++) {
const char *name = sel_cname(sels[i]);
SEL sel = sel_registerNameNoLock(name, isBundle);
if (sels[i] != sel) {
sels[i] = sel;
}
}
}
}
/*
这一部分是获取并保存所有的类
*/
// Discover classes. Fix up unresolved future classes. Mark bundle classes.
bool hasDyldRoots = dyld_shared_cache_some_image_overridden();
for (EACH_HEADER) {
// 如果该 hi 已经进行了预优化处理就不需要调用 readClass() 函数加载类数据
if (! mustReadClasses(hi, hasDyldRoots)) {
// Image is sufficiently optimized that we need not call readClass()
continue;
}
// 获取所有的类
classref_t const *classlist = _getObjc2ClassList(hi, &count);
bool headerIsBundle = hi->isBundle();
bool headerIsPreoptimized = hi->hasPreoptimizedClasses();
for (i = 0; i < count; i++) {
Class cls = (Class)classlist[i];
// 加载类的数据,将类添加上面创建的 gdb_objc_realized_classes 表以及 allocatedClasses 表中保存
Class newCls = readClass(cls, headerIsBundle, headerIsPreoptimized);
// 为需要懒加载的类申请内存空间,并将其保存到数组中
if (newCls != cls && newCls) {
// Class was moved but not deleted. Currently this occurs
// only when the new class resolved a future class.
// Non-lazily realize the class below.
resolvedFutureClasses = (Class *)
realloc(resolvedFutureClasses,
(resolvedFutureClassCount+1) * sizeof(Class));
resolvedFutureClasses[resolvedFutureClassCount++] = newCls;
}
}
}
/*
这一部分是修复需要重映射的类
*/
// Fix up remapped classes
// Class list and nonlazy class list remain unremapped.
// Class refs and super refs are remapped for message dispatching.
// 如果有需要重新映射的类
if (!noClassesRemapped()) {
for (EACH_HEADER) {
// 获取 hi 中所有的类引用
Class *classrefs = _getObjc2ClassRefs(hi, &count);
// 遍历这些类引用,如果类引用已被重新分配或者是被忽略的弱链接类,就将该类引用重新赋值为从重映射类表中取出新类
for (i = 0; i < count; i++) {
remapClassRef(&classrefs[i]);
}
// fixme why doesn't test future1 catch the absence of this?
// 获取 hi 中所有类的父类引用
classrefs = _getObjc2SuperRefs(hi, &count);
// 操作同上,修复父类引用
for (i = 0; i < count; i++) {
remapClassRef(&classrefs[i]);
}
}
}
/*
这一部分是修复老消息的 IMP 指向的函数指针
*/
#if SUPPORT_FIXUP
// Fix up old objc_msgSend_fixup call sites
for (EACH_HEADER) {
// 获取 hi 中的消息
message_ref_t *refs = _getObjc2MessageRefs(hi, &count);
if (count == 0) continue;
// 遍历消息,注册 SEL 并修复部分老的消息 IMP 的指向,如 alloc 消息的 IMP 指向修复为 objc_alloc 等等
for (i = 0; i < count; i++) {
fixupMessageRef(refs+i);
}
}
#endif
/*
这一部分是获取并修复协议类
*/
bool cacheSupportsProtocolRoots = sharedCacheSupportsProtocolRoots();
// Discover protocols. Fix up protocol refs.
for (EACH_HEADER) {
extern objc_class OBJC_CLASS_$_Protocol;
Class cls = (Class)&OBJC_CLASS_$_Protocol;
ASSERT(cls);
// 获取保存协议的哈希表
NXMapTable *protocol_map = protocols();
bool isPreoptimized = hi->hasPreoptimizedProtocols();
// Skip reading protocols if this is an image from the shared cache
// and we support roots
// Note, after launch we do need to walk the protocol as the protocol
// in the shared cache is marked with isCanonical() and that may not
// be true if some non-shared cache binary was chosen as the canonical
// definition
// 如果在首次启动时,发现该协议在支持根目录的共享缓存中已进行了预优化,就跳过加载
if (launchTime && isPreoptimized && cacheSupportsProtocolRoots) {
continue;
}
bool isBundle = hi->isBundle();
// 获取 hi 中的所有协议类并将其保存到上面获取的 protocol_map 表中
protocol_t * const *protolist = _getObjc2ProtocolList(hi, &count);
for (i = 0; i < count; i++) {
readProtocol(protolist[i], cls, protocol_map,
isPreoptimized, isBundle);
}
}
/*
这一部分是修复协议的引用
*/
// Fix up @protocol references
// Preoptimized images may have the right
// answer already but we don't know for sure.
for (EACH_HEADER) {
// At launch time, we know preoptimized image refs are pointing at the
// shared cache definition of a protocol. We can skip the check on
// launch, but have to visit @protocol refs for shared cache images
// loaded later.
// 依旧在首次启动时跳过在支持根目录的共享缓存中已经预优化协议类的 hi
if (launchTime && cacheSupportsProtocolRoots && hi->isPreoptimized())
continue;
// 获取 hi 中所有被引用的协议类
protocol_t **protolist = _getObjc2ProtocolRefs(hi, &count);
// 遍历这些被引用的协议类,修复它们的指针,确保指向协议类表中保存的正确地址
for (i = 0; i < count; i++) {
remapProtocolRef(&protolist[i]);
}
}
/*
这一部分是获取分类
*/
// Discover categories.
for (EACH_HEADER) {
// 分类中是否有属性
bool hasClassProperties = hi->info()->hasCategoryClassProperties();
// 处理分类列表的函数(闭包)
auto processCatlist = [&](category_t * const *catlist) {
for (i = 0; i < count; i++) {
// 获取分类
category_t *cat = catlist[i];
// 重映射分类所属的类
Class cls = remapClass(cat->cls);
// 绑定分类及其所属的 hi 结构
locstamped_category_t lc{cat, hi};
if (!cls) {
continue;
}
// Process this category.
if (cls->isStubClass()) {
// Stub classes are never realized. Stub classes
// don't know their metaclass until they're
// initialized, so we have to add categories with
// class methods or properties to the stub itself.
// methodizeClass() will find them and add them to
// the metaclass as appropriate.
if (cat->instanceMethods ||
cat->protocols ||
cat->instanceProperties ||
cat->classMethods ||
cat->protocols ||
(hasClassProperties && cat->_classProperties))
{
// 如果是存根类,将分类中的内容添加到未附加表中保存
objc::unattachedCategories.addForClass(lc, cls);
}
} else {
// First, register the category with its target class.
// Then, rebuild the class's method lists (etc) if
// the class is realized.
if (cat->instanceMethods || cat->protocols
|| cat->instanceProperties)
{
// 针对对象类处理。对象方法、对象协议、对象属性
// 如果该类已实现,直接将分类中的内容添加到类中;否则添加到未附加表中保存
if (cls->isRealized()) {
attachCategories(cls, &lc, 1, ATTACH_EXISTING);
} else {
objc::unattachedCategories.addForClass(lc, cls);
}
}
if (cat->classMethods || cat->protocols
|| (hasClassProperties && cat->_classProperties))
{
// 针对元类处理。类方法、类协议、类属性
// 处理方式和对象类相同
if (cls->ISA()->isRealized()) {
attachCategories(cls->ISA(), &lc, 1, ATTACH_EXISTING | ATTACH_METACLASS);
} else {
objc::unattachedCategories.addForClass(lc, cls->ISA());
}
}
}
}
};
// 获取 hi 中所有的分类数据调用 processCatlist 函数进行处理
processCatlist(_getObjc2CategoryList(hi, &count));
processCatlist(_getObjc2CategoryList2(hi, &count));
}
/*
这一部分是实现非懒加载的类
非懒加载的类就是实现了 +load 方法的类
*/
// Category discovery MUST BE Late to avoid potential races
// when other threads call the new category code before
// this thread finishes its fixups.
// +load handled by prepare_load_methods()
// Realize non-lazy classes (for +load methods and static instances)
for (EACH_HEADER) {
// 获取 hi 中非懒加载的类(实现了 +load 方法的类)
classref_t const *classlist =
_getObjc2NonlazyClassList(hi, &count);
for (i = 0; i < count; i++) {
// 重映射类,获取正确的类指针
Class cls = remapClass(classlist[i]);
if (!cls) continue;
// 将类及其元类添加到 allocatedClasses 中保存
addClassTableEntry(cls);
// 对类进行初始化
realizeClassWithoutSwift(cls, nil);
}
}
/*
这一部分是实现懒加载的类
懒加载的类就是没有实现了 +load 方法的类
*/
// Realize newly-resolved future classes, in case CF manipulates them
if (resolvedFutureClasses) {
// 遍历懒加载类对其进行初始化,并将该类及其子类全部标记为原始 isa 指针(即 isa 指向类)
for (i = 0; i < resolvedFutureClassCount; i++) {
Class cls = resolvedFutureClasses[i];
realizeClassWithoutSwift(cls, nil);
cls->setInstancesRequireRawIsaRecursively(false/*inherited*/);
}
// 处理完成后,释放保存懒加载类的数组内存空间
free(resolvedFutureClasses);
}
#undef EACH_HEADER
}
Copy the code
The _read_images function does a lot of things, but to summarize:
- Determines whether a non-pointer is used to optimize isa
- Obfuscated pointer protection code
- Create a container to hold the class
- Get registration and fix SEL
- Get and load the classes, save the non-lazy-loaded classes into the container created in 3, create a container for lazy-loaded classes and save it
- Fix classes that need to be remapped
- Fix IMPs for old messages that need to be remapped
- Gets and fixes the protocol class
- Fix protocol class references that need to be remapped
- Gets and saves the categories, and adds the contents of the categories to the classes to which they belong if they are already implemented
- Implement classes that are not lazily loaded
- Implementing lazy loading classes
That is, in this function, methods, classes, protocols, and classifications are retrieved from the image file and processed accordingly
realizeClassWithoutSwift
static Class realizeClassWithoutSwift(Class cls, Class previously) { runtimeLock.assertLocked(); const class_ro_t *ro; class_rw_t *rw; Class supercls; Class metacls; bool isMeta; if (! cls) return nil; if (cls->isRealized()) return cls; ASSERT(cls == remapClass(cls)); // fixme verify class is not in an un-dlopened part of the shared cache? Ro = (const class_ro_t *) CLS ->data(); If (ro->flags & RO_FUTURE) {// if (ro->flags & RO_FUTURE) { // This was a future class. Rw data is already allocated. Rw = CLS ->data(); ro = cls->data()->ro; / / changes are identified as the implemented CLS - > changeInfo (RW_REALIZED | RW_REALIZING, RW_FUTURE); } else {// If it is a normal class, Allocate writeable class data. rw = (class_rw_t *)calloc(sizeof(class_rw_t), 1); Allocate writeable class data. rw = (class_rw_t *)calloc(sizeof(class_rw_t), 1); // Add compile-time data (ro) to runtime data (rw) save rw->ro = ro; / / modify logo for the implemented rw - > flags = RW_REALIZED | RW_REALIZING; CLS ->setData(rw); } isMeta = ro->flags & RO_META; #if FAST_CACHE_META if (isMeta) cls->cache.setBit(FAST_CACHE_META); Rw ->version = isMeta? 7:0; // old runtime went up to 6 // Choose an index for this class. // Sets cls->instancesRequireRawIsa if indexes no more // Sets an index for the current class, and if no more indexes are available, sets the ISA pointer to the class instance object as primitive (that is, pointing directly to its own class) CLS ->chooseClassArrayIndex(); // Realize superclass and metaclass, if they aren't already. // This needs to be done after RW_REALIZED is set above, for root classes. // This needs to be done after class index is chosen, for root metaclasses. // This assumes that none of those classes have Swift contents, // or that Swift's initializers have already been called. // fixme that assumption will be wrong if we add support // For ObjC subclasses of Swift classes. Supercls = realizeClassWithoutSwift(remapClass(CLS ->superclass), nil) Metacls = realizeClassWithoutSwift(remapClass(CLS ->ISA()), nil); #if SUPPORT_NONPOINTER_ISA if (isMeta) {// Metaclasses do not need any // This allows for a faspath for classes in objc_retain/objc_release. Set the instance objects of isa is the original pointer CLS - > setInstancesRequireRawIsa (); } else { // Disable non-pointer isa for some classes and/or platforms. // Set instancesRequireRawIsa. bool instancesRequireRawIsa = cls->instancesRequireRawIsa(); bool rawIsaIsInherited = false; static bool hackedDispatch = false; If (DisableNonpointerIsa) {// The environment or application SDK version does not support non-pointer ISA // Non-pointer ISA disabled by environment or app SDK version instancesRequireRawIsa = true; } else if (! hackedDispatch && 0 == strcmp(ro->name, "OS_object")) {// Hack for libdispatch et al-isa also acts as vtable pointer hackedDispatch = true; instancesRequireRawIsa = true; } else if (supercls && supercls->superclass && supercls->instancesRequireRawIsa()) { // This is also propagated by addSubclass() // but nonpointer isa setup needs it earlier. // Special case: InstancesRequireRawIsa does not propagate propagate // from root class to root metaclass The current class does not support instancesRequireRawIsa = true; rawIsaIsInherited = true; } // If the class does not support non-pointer ISA, Mark this class and its subclasses as non-pointer if (instancesRequireRawIsa) { cls->setInstancesRequireRawIsaRecursively(rawIsaIsInherited); SUPPORT_NONPOINTER_ISA #endif // Update superclass and metaclass in case of remapping // Remap superclass CLS ->superclass = supercls; // Remap the metaclass CLS ->initClassIsa(metacls); // Reconcile instance variable offsets / layout. // This may reallocate class_ro_t, Updating our ro variable. // If the class is not metaclass and has a parent, then adjust the memory layout of the current class according to its parent if (supercls &&! isMeta) reconcileInstanceVariables(cls, supercls, ro); CLS ->setInstanceSize(ro->instanceSize); If (ro->flags & RO_HAS_CXX_STRUCTORS) {CLS ->setHasCxxDtor(); if (! (ro->flags & RO_HAS_CXX_DTOR_ONLY)) { cls->setHasCxxCtor(); } } // Propagate the associated objects forbidden flag from ro or from // the superclass. // If compile-time data (RO) or its parent disallows AssociatedObject, The current class should also be banned if ((ro - > flags & RO_FORBIDS_ASSOCIATED_OBJECTS) | | (supercls && supercls - > forbidsAssociatedObjects ())) { rw->flags |= RW_FORBIDS_ASSOCIATED_OBJECTS; } // Connect this class to its superclass's subclass lists // Connect this class to its superclass's subclass lists, If (supercls) {addSubclass(supercls, CLS); } else { addRootClass(cls); MethodizeClass (CLS, previously);} // Attach categories to methodizeClass(CLS, previously); return cls; }Copy the code
The function realizeClassWithoutSwift handles the implementation logic of the class:
- Create a new RW structure that contains the original RO structure and replace ro in the class
- Implements and associates its remapped parent and metaclass
- Check whether the current class supports non-pointer ISA
- Adjust the memory layout of the class
- Add attributes, methods, and protocols from RO and classification to RW
load_images
void load_images(const char *path __unused, Const struct mach_header *mh) {// Return without taking locks if there are no + loads methods here // There are two ways to determine the absence of a +load method: there are no classes in the mirror that are not lazily loaded, and there are no classes in the mirror that are not lazily loaded. hasLoadMethods((const headerType *)mh)) return; recursive_mutex_locker_t lock(loadMethodLock); // Discover load methods { mutex_locker_t lock2(runtimeLock); Prepare_load_methods ((const headerType *)mh); // Get all the +load methods to be called prepare_load_methods((const headerType *)mh); } // Call +load methods(without runtimelock-re-entrant) // Call all +load methods(); }Copy the code
The load_images function does only one thing: it calls the +load method of the class and its classification. This process is divided into two steps: the first step is to obtain data through prepare_load_methods function; The second step is to process the data with the call_load_methods function
prepare_load_methods
void prepare_load_methods(const headerType *mhdr) { size_t count, i; runtimeLock.assertLocked(); Classref_t const * classList = _getObjc2NonlazyClassList(MHDR, &count); // Iterate over these classes and add their +load method to the loadable_classes array, adding their parent's +load method for (I = 0; i < count; i++) { schedule_class_load(remapClass(classlist[i])); } / / get all the lazy loading classification category_t * const * categorylist = _getObjc2NonlazyCategoryList (MHDR, & count); For (I = 0; i < count; i++) { category_t *cat = categorylist[i]; Class CLS = remapClass(cat-> CLS); if (! cls) continue; // Implement realizeClassWithoutSwift(CLS, nil) if the class to which the class belongs is not implemented. ASSERT(cls->ISA()->isRealized()); // Traverse these categories and add their +load method to the loadable_categories array save add_category_to_loadable_list(cat); }}Copy the code
The first thing you need to do is get all the +load methods for the classes and categories, which is what the prepare_load_methods function does
static void schedule_class_load(Class cls) { if (! cls) return; ASSERT(cls->isRealized()); If (CLS ->data()->flags &rw_loaded) return; // add the parent's +load (CLS ->superclass); // add the parent's +load (CLS ->superclass); // Save the obtained +load method into the array and wait for add_class_to_loadable_list(CLS); CLS ->setInfo(RW_LOADED); }Copy the code
void add_class_to_loadable_list(Class cls) { IMP method; loadMethodLock.assertLocked(); CLS ->getLoadMethod(); // If there is no +load method in the class, stop there. method) return; // Don't bother if CLS has no load method // If loadable_classes is not enough, If (loadable_classes_Used == loadable_classes_allocated) {loadable_classes_allocated = loadable_classes_allocated*2 + 16; loadable_classes = (struct loadable_class *) realloc(loadable_classes, loadable_classes_allocated * sizeof(struct loadable_class)); Loadable_classes [loadable_classes_used]. CLS = CLS; Loadable_classes [loadable_classes_used]. Method = method; Loadable_classes_used ++; }Copy the code
The above two functions handle the class by checking whether the +load method has already been called by the flag bit of the current class and its parent in the inheritance chain. If not, use the loadable_class structure to associate the class with the +load method. And saved to the loadable_classes array for use.
There is a small detail: one is that the parent class in the inheritance chain is appended to its subclass, which means that the parent’s +load method precedes its subclass when called
void add_category_to_loadable_list(Category cat) { IMP method; loadMethodLock.assertLocked(); // Get the category +load method = _category_getLoadMethod(cat); // Don't bother if cat has no load method // If (! method) return; // If loadable_categories is insufficient, If (loadable_categories_used == loadable_categories_allocated) {loadable_categories_allocated = loadable_categories_allocated*2 + 16; loadable_categories = (struct loadable_category *) realloc(loadable_categories, loadable_categories_allocated * sizeof(struct loadable_category)); } // record the "loadable_categories_used" method. Cat = cat; "Loadable_categories [loadable_categories_used]. Method = method;" "// Categories_categories_used ++" }Copy the code
Categories are handled much like classes. In the add_category_to_loadable_list function, the associated category and its +load method are the loadable_category structure, and the loadable_categories array holds the data
call_load_methods
void call_load_methods(void) { static bool loading = NO; bool more_categories; loadMethodLock.assertLocked(); // Re-entrant calls do nothing; The outermost call will finish the job. If (loading) return; Loading = YES; Void *pool = objc_autoreleasePoolPush(); Do {// 1. Fail to call Repeatedly until there aren't any more loads // Loadable_classes_used (that is, the array holding the +load method of the class) // call the +load method of the class repeatedly, While (loadable_classes_used > 0) {call_class_loads(); } // call_category_loads loads loads loads loads loads loads loads loads loads loads loads loads loads loads loads loads loads Load method more_categories = call_category_loads(); // 3. Run more +loads if there are classes OR more untried categories // Will continue to call its +} while the load method (loadable_classes_used > 0 | | more_categories); // Destroy the auto-release pool objc_autoreleasePoolPop(pool); Loading = NO; }Copy the code
In call_load_methods, we first protect the processing logic of the current function with static variables, then create an automatic release pool to solve memory management problems that might occur when we call the +load method, and finally turn on a loop to process the acquired data. In this function you can see that the class’s +load method precedes the class call
static void call_class_loads(void) { int i; Struct loadable_classes = loadable_classes; struct loadable_classes = loadable_classes; Int used = loadable_classes_used; Loadable_classes: loadable_classes = nil; loadable_classes_allocated = 0; loadable_classes_used = 0; Class (I = 0; // Call all +load for the detached list. i < used; Class CLS = classes[I].cls; Load_t load_method = (load_method_t)classes[I]. Method; // If there is no class, skip the next if (! cls) continue; // Call the +load method (*load_method)(CLS, @selector(load)); } // Destroy the detached list. // Release temporary variables to save data if (classes) free(classes); }Copy the code
static bool call_category_loads(void) { int i, shift; bool new_categories_added = NO; Struct loadable_category *cats = Detach current loadable list. // create a temporary variable to hold data from loadable_categories loadable_categories; // Create temporary variables to hold the total value obtained from loadable_categories int used = loadable_categories_used; // Create temporary variables to save the capacity obtained from loadable_categories int Allocated = loadable_categories_allocated; loadable_categories = nil; loadable_categories_allocated = 0; loadable_categories_used = 0; Class (I = 0; // Call all +load for the detached list. i < used; Cat = cats[I].cat; cat = cats[I].cat; Load_method_t load_method = (load_method_t)cats[I]. Class cls; // Skip the next if (! cat) continue; CLS = _category_getClass(cat); CLS ->isLoadable() is always true, If (CLS && CLS ->isLoadable()) {// call +load (*load_method)(CLS, @selector(load)); Cats [I].cat = nil; cats[I].cat = nil; cats[I].cat = nil; {// Compact detached List (order-preserving) {// Go through the data, discard the classifications that have called +load, and record the remaining amount shift = 0; for (i = 0; i < used; i++) { if (cats[i].cat) { cats[i-shift] = cats[i]; } else { shift++; } } used -= shift; "New_categories_added =" // Copy any new +load candidates from the new list to the categories_added list (loadable_categories_used > 0); For (I = 0; for (I = 0; i < loadable_categories_used; i++) { if (used == allocated) { allocated = allocated*2 + 16; cats = (struct loadable_category *) realloc(cats, allocated * sizeof(struct loadable_category)); } cats[used++] = loadable_categories[i]; } // Destroy the new list. If (loadable_categories) free(loadable_categories); // Reattach the (now augmented) detached list. // But if there's nothing left to load, Destroy the list. if (used) {// If there are any classes that haven't called the +load method // loadable_categories point to the newly created cat array loadable_categories = cats; "Loadable_categories_used =" Categories_categories_applied "; // Record the newly applied capacity loadable_categories_allocated = Allocated; If (cats) free(cats); if (cats) free(cats); // empty loadable_categories loadable_categories = nil; loadable_categories_used = 0; loadable_categories_allocated = 0; } // return new_categories_added; }Copy the code
These two functions are responsible for handling class and class +load calls. The logic is similar, iterating through the array to get the data and then calling the method.
unmap_image
void
unmap_image(const char *path __unused, const struct mach_header *mh)
{
recursive_mutex_locker_t lock(loadMethodLock);
mutex_locker_t lock2(runtimeLock);
unmap_image_nolock(mh);
}
Copy the code
The unmap_image function is just an entry point, leaving the main work to unmap_image_NOLock
void unmap_image_nolock(const struct mach_header *mh) { header_info *hi; // Find the Runtime header_info struct for the image for (hi = FirstHeader; hi ! = NULL; hi = hi->getNext()) { if (hi->mhdr() == (const headerType *)mh) { break; }} // If (! hi) return; // unload_image(hi); // Remove header_info from header list. free(hi); }Copy the code
Unmap_image_nolock does two things. One is to unload the data in hi. The other thing is to remove the HI
_unload_image
void _unload_image(header_info *hi) { size_t count, i; loadMethodLock.assertLocked(); runtimeLock.assertLocked(); // Unload unattached categories and categories waiting for +load. // Ignore __objc_catlist2. We don't support unloading Swift // And we will never. // Get category_t * const * catList = _getObjc2CategoryList(hi, &count); For (I = 0; i < count; Category_t *cat = catList [I]; // Category_t *cat = catList [I]; Class cls = remapClass(cat->cls); if (! cls) continue; // category for ignored weak-linked class // fixme for MH_DYLIB cat's class may have been unloaded already // unattached From the list / / has not been attached to a class of the current classification objc: remove an array: unattachedCategories. EraseCategoryForClass (cat, CLS); // remove the current category_category_from_loadable_list (cat) from the array waiting for the +load method to be called; } // Unload classes. // Gather classes from both __DATA,__objc_clslist // and __DATA,__objc_nlclslist. arclite's hack puts a class in the latter // only, and we need to unload that class if we unload an arclite image. objc::DenseSet<Class> classes{}; classref_t const *classlist; _getObjc2ClassList = _getObjc2ClassList(hi, &count); for (i = 0; i < count; i++) { Class cls = remapClass(classlist[i]); if (cls) classes.insert(cls); } // get a non-lazy class from hi, redirect it to classes and save classList = _getObjc2NonlazyClassList(hi, &count); for (i = 0; i < count; i++) { Class cls = remapClass(classlist[i]); if (cls) classes.insert(cls); } // First detach classes from each other. Then free each class. // This avoid bugs where this loop unloads a subclass Before its superclass for (Class CLS: classes) {// Remove the current Class remove_class_from_loadable_list(CLS) from the array waiting for the +load method to be called; // Detach_class (CLS ->ISA(), YES); // Detach_class (CLS, NO); } for (CLS: classes) {// Free the metaclass free_class(CLS ->ISA()); // Free the current class free_class(CLS); } // XXX FIXME -- Clean up protocols: // <rdar://problem/9033191> Support unloading protocols at dylib/image unload time // fixme DebugUnload }Copy the code
To unload the hi data, the _unload_image function disassociates the hi categories, classes, metaclasses and their associated data and frees them. Where detach_class is done in the detach_class function, free_class frees memory
detach_class
static void detach_class(Class cls, bool isMeta) { runtimeLock.assertLocked(); / / categories not yet attached to this class / / removed from the class not additional classification array current class objc: : unattachedCategories. EraseClass (CLS); // If (CLS ->isRealized()) {supercls = CLS ->superclass; // If (CLS ->isRealized()) { RemoveSubclass (supercls, CLS) {if (supercls, CLS) {// If the current class has a parent, remove the current class from the parent's subclass list. } else {// If the current class has no parent, remove the current class from the root class list. } } // class tables and +load queue if (! IsMeta) {// If it is not a metaclass // Remove the current class from the gDB_objC_realized_classes table that holds all classes // Remove the current class from the nonmeta_class_map table that holds identical classes created at run time removeNamedClass(cls, cls->mangledName()); } / / removed from the save allocatedClasses loaded class table current class objc: : allocatedClasses. The get () erase (CLS); }Copy the code
The obvious thing in detach_class is to remove classes and metaclasses from the various containers that hold them
free_class
static void free_class(Class cls) { runtimeLock.assertLocked(); // Stop if the current class is not implemented. cls->isRealized()) return; Auto rw = CLS ->data(); Auto ro = rw->ro; // Delete the method cache data of the current class cache_delete(CLS); // Release the types pointer for (auto& meth: rw->methods) {try_free(meth. Types); } rw-> methods.tryfree (); Const ivar_list_t *ivars = ro->ivars; If (ivars) {for (auto& ivar: *ivars) {try_free(ivar. Offset); try_free(ivar.name); try_free(ivar.type); } // Finally release the member variable list pointer of the current class try_free(ivars); For (auto& prop: rw->properties) {try_free(prop. Name); try_free(prop. try_free(prop.attributes); } // Release the property list pointer to the current class rw-> properties.tryfree (); Rw -> protocols.tryfree (); rw-> protocols.tryfree (); Try_free (ro->ivarLayout); try_free(ro->ivarLayout); // Release the weak reference member variable layout pointer try_free(ro->weakIvarLayout); Try_free (ro->name); try_free(ro->name); Try_free (ro); try_free(ro); Try_free (rw); try_free(rw); // Free the pointer to the current class try_free(CLS); }Copy the code
What free_class does is it frees the various data structures associated with the class, and finally frees the class itself
removeHeader
void removeHeader(header_info *hi) { header_info *prev = NULL; header_info *current = NULL; For (current = FirstHeader; current ! = NULL; Current = current->getNext()) {header_info *deadHead = current; // If there is no former node, place the head pointer to the latter node. // If there is no former node, place the head pointer to the latter node. // Remove from the linked list. If (prev) prev->setNext(current->getNext()); else FirstHeader = current->getNext(); // no prev so removing head // Update LastHeader if necessary. if (LastHeader == deadHead) { LastHeader = NULL; // will be recomputed next time it's used } break; } prev = current; } // If hi uses shared cache, #if __OBJC2__ if ((hi-> MHDR ()->flags & MH_DYLIB_IN_CACHE) == 0) {foreach_data_segment(hi-> MHDR (), [](const segmentType *seg, intptr_t slide) { uintptr_t start = (uintptr_t)seg->vmaddr + slide; objc::dataSegmentsRanges.remove(start, start + seg->vmsize); }); } #endif }Copy the code
There is less logic in removeHeader than in _unload_image, which is to remove the HI from the linked list and container that holds it
Object to load
There are two ways to get instance objects:
[NSObject new];
Copy the code
or
[[NSObject alloc] init];
Copy the code
Where class method + (instanceType)new is implemented as:
// Calls [CLS new] id objc_opt_new(Class CLS) {#if __OBJC2__ // Check whether the Class is custom New/self/class/respondsToSelector/isKindOfClass method, if not the callAlloc function called the if (fastpath (CLS &&! cls->ISA()->hasCustomCore())) { return [callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/) init]; } # endif / / if the custom new/self/class/respondsToSelector/isKindOfClass method, Return ((id(*)(id, SEL))objc_msgSend)(CLS, @selector(new)); }Copy the code
Class method + (instanceType)alloc
// Calls [cls alloc].
id
objc_alloc(Class cls)
{
return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/);
}
Copy the code
So new is alloc + init at instantiation time.
alloc
Static ALWAYS_INLINE ID callAlloc(Class CLS, bool checkNil, bool allocWithZone=false) {#if __OBJC2__, Return nil if (slowPath (checkNil &&! cls)) return nil; // Check whether the alloc/allocWithZone: method is custom defined on the class inheritance chain. If not, call _objc_rootAllocWithZone if (fastPath (! cls->ISA()->hasCustomAWZ())) { return _objc_rootAllocWithZone(cls, nil); } # shortcuts available. // No shortcuts available if (allocWithZone) {return ((id(*)(id, SEL, struct _NSZone *))objc_msgSend)(cls, @selector(allocWithZone:), nil); Return ((id(*)(id, SEL))objc_msgSend)(CLS, @selector(alloc))); }Copy the code
The job of the callAlloc function is to decide whether to call the default _objc_rootAllocWithZone function directly or call the custom alloc method by sending a message.
id
_objc_rootAllocWithZone(Class cls, malloc_zone_t *zone __unused)
{
// allocWithZone under __OBJC2__ ignores the zone parameter
return _class_createInstanceFromZone(cls, 0, nil,
OBJECT_CONSTRUCT_CALL_BADALLOC);
}
Copy the code
static ALWAYS_INLINE id _class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone, int construct_flags = OBJECT_CONSTRUCT_NONE, bool cxxConstruct = true, Size_t *outAllocatedSize = nil) {ASSERT(CLS ->isRealized()); Read class's info bits all at once for performance bool hasCxxCtor = cxxConstruct && cls->hasCxxCtor(); Bool hasCxxDtor = CLS ->hasCxxDtor(); Isa bool fast = CLS ->canAllocNonpointer(); size_t size; Size = CLS ->instanceSize(extraBytes); if (outAllocatedSize) *outAllocatedSize = size; id obj; Obj = (id)malloc_zone_calloc((malloc_zone_t *)zone, 1, size); obj = (id)malloc_zone_calloc(malloc_zone_t *)zone, 1, size); } else {// Apply and initialize heap memory space obj = (id)calloc(1, size); } // If (slowpath(! obj)) { if (construct_flags & OBJECT_CONSTRUCT_CALL_BADALLOC) { return _objc_callBadAllocHandler(cls); } return nil; } if (! Zone && fast) {// If isa is isa_T, use this method to initialize obj->initInstanceIsa(CLS, hasCxxDtor); } else { // Use raw pointer isa on the assumption that they might be // doing something weird with the zone or RR. // Init obj->initIsa(CLS) if isa is of Class type. } // if there is no C++ constructor, return if (fastpath(! hasCxxCtor)) { return obj; } / / if there is a c + + constructor is invoked the function for processing construct_flags | = OBJECT_CONSTRUCT_FREE_ONFAILURE; return object_cxxConstructFromClass(obj, cls, construct_flags); }Copy the code
The alloc method does most of its work in the _class_createInstanceFromZone function: allocates memory for the object and performs some initialization.
Inline void objc_object::initInstanceIsa(Class CLS, bool hasCxxDtor) {// Determine whether the isa of the current Class is isa_T ASSERT(! cls->instancesRequireRawIsa()); ASSERT(hasCxxDtor == CLS ->hasCxxDtor()); initIsa(cls, true, hasCxxDtor); }Copy the code
inline void objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor) { ASSERT(! isTaggedPointer()); if (! // if it is not isa_t, isa points to CLS isa = ISA_t ((uintptr_t) CLS); } else { ASSERT(! DisableNonpointerIsa); ASSERT(! cls->instancesRequireRawIsa()); // create a temporary variable of type ISA_t isa_t newisa(0); // Nonpointer indicates that the isa of the current object is isa_T newISa. bits = ISA_MAGIC_VALUE. Isa. magic is part of ISA_MAGIC_VALUE // isa.nonpointer is part of ISA_MAGIC_VALUE // has_cxx_dtor specifies whether the current object has C++ The destructor newisa.has_cxx_dtor = hasCxxDtor; // Set shiftcls to point to the class object. This is because the class pointer is aligned in bytes (8bits). The last three bits of the pointer are all meaningless zeros, so it can be moved 3 bits to the right to reduce the meaningless memory footprint. newisa.shiftcls = (uintptr_t)cls >> 3; // This write must be performed in a single store in some cases // (for example when realizing a class because other threads // may simultaneously try to use the class). // fixme use atomics here to guarantee single-store and to // guarantee memory order w.r.t. the class index table // ... But not too atomic because we don't want to hurt instantiation // Assign temporary variables to structure members isa = newISA; }}Copy the code
Initialization operations include specifying the type of isa, indicating that the object has been created, noting whether it has a C++ destructor, and finally, and most importantly, saving Pointers to objects of the object’s class.
init
- (id)init {
return _objc_rootInit(self);
}
Copy the code
id
_objc_rootInit(id obj)
{
// In practice, it will be hard to rely on this function.
// Many classes do not properly chain -init calls.
return obj;
}
Copy the code
Init doesn’t actually do anything inside except return the object created by the alloc method
__builtin_expect
The callAlloc function has two macros, slowPath and FastPath, which are defined as:
#define fastpath(x) (__builtin_expect(bool(x), 1))
#define slowpath(x) (__builtin_expect(bool(x), 0))
Copy the code
Both macros actually use the same function __builtin_expect, which was introduced in GCC v2.96 and is declared as follows:
long __builtin_expect(long exp, long c);
Copy the code
It means exp == c with a high probability, so fastpath means x is likely to be true; Slowpath is x is likely to be false