Runtime 之 消息发送流程解析这一节已经介绍了消息发送的整个流程,过程中涉及到的方法缓存的插入,将在本节进行解析。
-
缓存结构信息
cache_t
struct cache_t {
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
/// 方法缓存数组(以散列表的形式进行存储)
explicit_atomic<struct bucket_t *> _buckets;
/// 容量的临界值
explicit_atomic<mask_t> _mask;
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
explicit_atomic<uintptr_t> _maskAndBuckets;
mask_t _mask_unused;
// How much the mask is shifted by.
static constexpr uintptr_t maskShift = 48;
// Additional bits after the mask which must be zero. msgSend
// takes advantage of these additional bits to construct the value
// `mask << 4` from `_maskAndBuckets` in a single instruction.
static constexpr uintptr_t maskZeroBits = 4;
// The largest mask value we can store.
static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
// The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1;
// Ensure we have enough bits for the buckets pointer.
static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
// _maskAndBuckets stores the mask shift in the low 4 bits, and
// the buckets pointer in the remainder of the value. The mask
// shift is the value where (0xffff >> shift) produces the correct
// mask. This is equal to 16 - log2(cache_size).
explicit_atomic<uintptr_t> _maskAndBuckets;
mask_t _mask_unused;
static constexpr uintptr_t maskBits = 4;
static constexpr uintptr_t maskMask = (1 << maskBits) - 1;
static constexpr uintptr_t bucketsMask = ~maskMask;
#else
#error Unknown cache mask storage type.
#endif
#if __LP64__
uint16_t _flags;
#endif
/// 缓存的个数
uint16_t _occupied;
public:
static bucket_t *emptyBuckets();
struct bucket_t *buckets();
mask_t mask();
mask_t occupied();
void incrementOccupied();
void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
void initializeToEmpty();
unsigned capacity();
bool isConstantEmptyCache();
bool canBeFreed();
#if __LP64__
bool getBit(uint16_t flags) const {
return _flags & flags;
}
void setBit(uint16_t set) {
__c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags, set, __ATOMIC_RELAXED);
}
void clearBit(uint16_t clear) {
__c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags, ~clear, __ATOMIC_RELAXED);
}
#endif
#if FAST_CACHE_ALLOC_MASK
bool hasFastInstanceSize(size_t extra) const
{
if (__builtin_constant_p(extra) && extra == 0) {
return _flags & FAST_CACHE_ALLOC_MASK16;
}
return _flags & FAST_CACHE_ALLOC_MASK;
}
size_t fastInstanceSize(size_t extra) const
{
ASSERT(hasFastInstanceSize(extra));
if (__builtin_constant_p(extra) && extra == 0) {
return _flags & FAST_CACHE_ALLOC_MASK16;
} else {
size_t size = _flags & FAST_CACHE_ALLOC_MASK;
// remove the FAST_CACHE_ALLOC_DELTA16 that was added
// by setFastInstanceSize
return align16(size + extra - FAST_CACHE_ALLOC_DELTA16);
}
}
void setFastInstanceSize(size_t newSize)
{
// Set during realization or construction only. No locking needed.
uint16_t newBits = _flags & ~FAST_CACHE_ALLOC_MASK;
uint16_t sizeBits;
// Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
// to yield the proper 16byte aligned allocation size with a single mask
sizeBits = word_align(newSize) + FAST_CACHE_ALLOC_DELTA16;
sizeBits &= FAST_CACHE_ALLOC_MASK;
if (newSize <= sizeBits) {
newBits |= sizeBits;
}
_flags = newBits;
}
#else
bool hasFastInstanceSize(size_t extra) const {
return false;
}
size_t fastInstanceSize(size_t extra) const {
abort();
}
void setFastInstanceSize(size_t extra) {
// nothing
}
#endif
static size_t bytesForCapacity(uint32_t cap);
static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld);
void insert(Class cls, SEL sel, IMP imp, id receiver);
static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn, cold));
}
struct bucket_t {
private:
/// 获取方法实现
explicit_atomic<uintptr_t> _imp;
/// 以方法名为key
explicit_atomic<SEL> _sel;
}
通过
cache_t
结构,可以看出是一个典型的散列表结构
-
_buckets
用来缓存方法的散列/哈希表 -
_mask
容量的临界值(散列表长度 - 1) -
_occupied
表示已经缓存的方法的数量
-
消息发送过程中在
lookUpImpOrForward
中获取到IMP后直接跳转执行log_and_fill_cache
static void
log_and_fill_cache(Class cls, IMP imp, SEL sel, id receiver, Class implementer)
{
#if SUPPORT_MESSAGE_LOGGING
if (slowpath(objcMsgLogEnabled && implementer)) {
bool cacheIt = logMessageSend(implementer->isMetaClass(),
cls->nameForLogging(),
implementer->nameForLogging(),
sel);
if (!cacheIt) return;
}
#endif
cache_fill(cls, sel, imp, receiver);
}
-
cache_t::insert
缓存的插入和扩容等相关核心代码
void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
#else
runtimeLock.assertLocked();
#endif
ASSERT(sel != 0 && cls->isInitialized());
// Use the cache as-is if it is less than 3/4 full
// 新的已缓存方法数量
mask_t newOccupied = occupied() + 1;
/// 获取当前缓存容量
unsigned oldCapacity = capacity(), capacity = oldCapacity;
/// 如果没有缓存方法
if (slowpath(isConstantEmptyCache())) {
// Cache is read-only. Replace it.
/// 当前容量为空,则重新进行初始化(4)
if (!capacity) capacity = INIT_CACHE_SIZE;
reallocate(oldCapacity, capacity, /* freeOld */false);
}
else if (fastpath(newOccupied <= capacity / 4 * 3)) {
// 新的缓存容量不大于总容量的3/4,则按照原样使用,无需扩容
}
else {
/// 进行扩容操作
/// 如果旧容量存在,则进行2倍扩容,否则直接赋值(4)
capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE;
/// 如果新的容量超过最大容量,则直接等于最大容量(32)
if (capacity > MAX_CACHE_SIZE) {
capacity = MAX_CACHE_SIZE;
}
/// 进行重新赋值和释放操作
reallocate(oldCapacity, capacity, true);
}
/// 获取当前缓存出数组
bucket_t *b = buckets();
mask_t m = capacity - 1;
mask_t begin = cache_hash(sel, m);
mask_t i = begin;
// Scan for the first unused slot and insert there.
/* There is guaranteed to be an empty slot because the
minimum size is 4 and we resized at 3/4 full.*/
do {
/// 查询当前位置,如果没有缓存,则进行存储
if (fastpath(b[i].sel() == 0)) {
/// 缓存数量++
incrementOccupied();
b[i].set<Atomic, Encoded>(sel, imp, cls);
return;
}
/// 缓存命中
if (b[i].sel() == sel) {
// The entry was added to the cache by some other thread
// before we grabbed the cacheUpdateLock.
return;
}
} while (fastpath((i = cache_next(i, m)) != begin));
cache_t::bad_cache(receiver, (SEL)sel, cls);
}
-
mask_t cache_next
通过开放定址法线性探测解决哈希碰撞
static inline mask_t cache_next(mask_t i, mask_t mask) {
return (i+1) & mask;
}
-
reallocate
,在首次初始化时和扩容时进行调用
void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld)
{
/// 获取当前buckets
bucket_t *oldBuckets = buckets();
/// 开辟新的buckets空间
bucket_t *newBuckets = allocateBuckets(newCapacity);
// Cache's old contents are not propagated.
// This is thought to save cache memory at the cost of extra cache fills.
// fixme re-measure this
ASSERT(newCapacity > 0);
ASSERT((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
/// 重新设置buckets和mask值
setBucketsAndMask(newBuckets, newCapacity - 1);
if (freeOld) {
/// 如果需要释放老的空间,则执行释放操作
cache_collect_free(oldBuckets, oldCapacity);
}
}
-
_mask的使用
_mask从侧面反映了cache_t中哈希桶的数量(哈希桶的数量 = _mask + 1),保
证了查找哈希桶时不会出现越界的情况
从上面的源码分析,我们知道cache_t在任何一次缓存方法的时候,哈希桶的数量一定是 >=4且能被 4整除的,_mask则等于哈希桶的数量-1,也就是说,缓存方法的时候,_mask的二进制位上全都是1。当循环查询哈希桶的时候,索引值是由xx & _mask运算得出的,因此索引值是小于哈希桶的数量的(index <= _mask,故index < capacity),也就不会出现越界的情况
-
为什么扩容临界点是3/4?
一般设定临界点就不得不权衡 空间利用率 和 时间利用率 。在 3/4 这个临界点的
时候,空间利用率比较高,同时又避免了相当多的哈希冲突,时间利用率也比较高。
假设两种极端情况:
当临界点是1的时候,也就是说当全部的哈希桶都缓存有方法时,才会扩容。这虽然让开辟出来的内存空间的利用率达到100%,但是会造成大量的哈希冲突,加剧了查找索引的时间成本,导致时间利用率低下,这与高速缓存的目的相悖;
当临界点是0.5的时候,意味着哈希桶的占用量达到总数一半的时候,就会扩容。这虽然极大避免了哈希冲突,时间利用率非常高,却浪费了一半的空间,使得空间利用率低下。这种以空间换取时间的做法同样不可取;
两相权衡下,当扩容临界点是3/4的时候,空间利用率 和 时间利用率 都相对比较高。
-
总结
通过以上例子的验证、源码的分析以及问题的讨论,现在总结一下cache_t的几个结论:
cache_t能缓存调用过的方法。
cache_t的三个成员变量中,
_buckets的类型是struct bucket_t *,也就是指针数组,它表示一系列的哈希桶(已调用的方法的SEL和IMP就缓存在哈希桶中),一个桶可以缓存一个方法。
_mask的类型是mask_t(mask_t在64位架构下就是uint32_t,长度为4个字节),它的值等于哈希桶的总数-1(capacity - 1),侧面反映了哈希桶的总数。
_occupied的类型也是mask_t,它代表的是当前_buckets已缓存的方法数。
当缓存的方法数到达临界点(桶总数的3/4)时,下次再缓存新的方法时,首先会丢弃旧的桶,同时开辟新的内存,也就是扩容(扩容后都是全新的桶,以后每个方法都要重新缓存的),然后再把新的方法缓存下来,此时_occupied为1。
当多个线程同时调用一个方法时,可分以下几种情况:
多线程读缓存:读缓存由汇编实现,无锁且高效,由于并没有改变_buckets和_mask,所以并无安全隐患。
多线程写缓存:OC用了个全局的互斥锁(cacheUpdateLock.assertLocked())来保证不会出现写两次缓存的情况。
多线程读写缓存:OC使用了ldp汇编指令、编译内存屏障技术、内存垃圾回收技术等多种手段来解决多线程读写的无锁处理方案,既保证了安全,又提升了系统的性能。