static void zend_hash_persist(HashTable *ht, zend_persist_func_t pPersistElement) { uint32_t idx, nIndex; Bucket *p; if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) { HT_SET_DATA_ADDR(ht, &uninitialized_bucket); return; } if (ht->nNumUsed == 0) { efree(HT_GET_DATA_ADDR(ht)); ht->nTableMask = HT_MIN_MASK; HT_SET_DATA_ADDR(ht, &uninitialized_bucket); ht->u.flags &= ~HASH_FLAG_INITIALIZED; return; } if (ht->u.flags & HASH_FLAG_PACKED) { void *data = HT_GET_DATA_ADDR(ht); zend_accel_store(data, HT_USED_SIZE(ht)); HT_SET_DATA_ADDR(ht, data); } else if (ht->nNumUsed < (uint32_t)(-(int32_t)ht->nTableMask) / 2) { /* compact table */ void *old_data = HT_GET_DATA_ADDR(ht); Bucket *old_buckets = ht->arData; uint32_t hash_size; if (ht->nNumUsed <= HT_MIN_SIZE) { hash_size = HT_MIN_SIZE; } else { hash_size = (uint32_t)(-(int32_t)ht->nTableMask); while (hash_size >> 1 > ht->nNumUsed) { hash_size >>= 1; } } ht->nTableMask = (uint32_t)(-(int32_t)hash_size); ZEND_ASSERT(((zend_uintptr_t)ZCG(mem) & 0x7) == 0); /* should be 8 byte aligned */ HT_SET_DATA_ADDR(ht, ZCG(mem)); ZCG(mem) = (void*)((char*)ZCG(mem) + ZEND_ALIGNED_SIZE((hash_size * sizeof(uint32_t)) + (ht->nNumUsed * sizeof(Bucket)))); HT_HASH_RESET(ht); memcpy(ht->arData, old_buckets, ht->nNumUsed * sizeof(Bucket)); efree(old_data); for (idx = 0; idx < ht->nNumUsed; idx++) { p = ht->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; /* persist bucket and key */ if (p->key) { zend_accel_store_interned_string(p->key); } /* persist the data itself */ pPersistElement(&p->val); nIndex = p->h | ht->nTableMask; Z_NEXT(p->val) = HT_HASH(ht, nIndex); HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); } return; } else {
static APC_HOTSPOT HashTable* my_copy_hashtable(HashTable *source, apc_context_t *ctxt) { uint32_t idx; HashTable *target; apc_pool *pool = ctxt->pool; if (ctxt->copy == APC_COPY_IN) { target = (HashTable*) pool->palloc(pool, sizeof(HashTable)); } else ALLOC_HASHTABLE(target); GC_REFCOUNT(target) = 1; GC_TYPE_INFO(target) = IS_ARRAY; zend_hash_index_update_ptr(&ctxt->copied, (uintptr_t) source, target); target->nTableSize = source->nTableSize; target->pDestructor = source->pDestructor; if (source->nNumUsed == 0) { target->u.flags = (source->u.flags & ~(HASH_FLAG_INITIALIZED|HASH_FLAG_PACKED|HASH_FLAG_PERSISTENT)) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS; target->nTableMask = HT_MIN_MASK; target->nNumUsed = 0; target->nNumOfElements = 0; target->nNextFreeElement = 0; target->nInternalPointer = HT_INVALID_IDX; HT_SET_DATA_ADDR(target, &uninitialized_bucket); } else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) { target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION; target->nTableMask = source->nTableMask; target->nNumUsed = source->nNumUsed; target->nNumOfElements = source->nNumOfElements; target->nNextFreeElement = source->nNextFreeElement; if (ctxt->copy == APC_COPY_IN) { HT_SET_DATA_ADDR(target, pool->palloc(pool, HT_SIZE(target))); } else HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target))); target->nInternalPointer = source->nInternalPointer; memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source)); if (target->nNumOfElements > 0 && target->nInternalPointer == HT_INVALID_IDX) { idx = 0; while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) { idx++; } target->nInternalPointer = idx; } } else if (source->u.flags & HASH_FLAG_PACKED) { target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION; target->nTableMask = source->nTableMask; target->nNumUsed = source->nNumUsed; target->nNumOfElements = source->nNumOfElements; target->nNextFreeElement = source->nNextFreeElement; if (ctxt->copy == APC_COPY_IN) { HT_SET_DATA_ADDR(target, pool->palloc(pool, HT_SIZE(target))); } else HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target))); target->nInternalPointer = source->nInternalPointer; HT_HASH_RESET_PACKED(target); if (target->nNumUsed == target->nNumOfElements) { apc_array_dup_packed_elements(ctxt, source, target, 0); } else { apc_array_dup_packed_elements(ctxt, source, target, 1); } if (target->nNumOfElements > 0 && target->nInternalPointer == HT_INVALID_IDX) { idx = 0; while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) { idx++; } target->nInternalPointer = idx; } } else { target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION; target->nTableMask = source->nTableMask; target->nNextFreeElement = source->nNextFreeElement; target->nInternalPointer = HT_INVALID_IDX; if (ctxt->copy == APC_COPY_IN) { HT_SET_DATA_ADDR(target, pool->palloc(pool, HT_SIZE(target))); } else HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target))); HT_HASH_RESET(target); if (target->u.flags & HASH_FLAG_STATIC_KEYS) { if (source->nNumUsed == source->nNumOfElements) { idx = apc_array_dup_elements(ctxt, source, target, 1, 0); } else { idx = apc_array_dup_elements(ctxt, source, target, 1, 1); } } else { if (source->nNumUsed == source->nNumOfElements) { idx = apc_array_dup_elements(ctxt, source, target, 0, 0); } else { idx = apc_array_dup_elements(ctxt, source, target, 0, 1); } } target->nNumUsed = idx; target->nNumOfElements = idx; if (idx > 0 && target->nInternalPointer == HT_INVALID_IDX) { target->nInternalPointer = 0; } } return target; }
static void zend_hash_persist(HashTable *ht, zend_persist_func_t pPersistElement) { uint32_t idx, nIndex; Bucket *p; HT_FLAGS(ht) |= HASH_FLAG_STATIC_KEYS; ht->pDestructor = NULL; ht->nInternalPointer = 0; if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) { if (EXPECTED(!ZCG(current_persistent_script)->corrupted)) { HT_SET_DATA_ADDR(ht, &ZCSG(uninitialized_bucket)); } else { HT_SET_DATA_ADDR(ht, &uninitialized_bucket); } return; } if (ht->nNumUsed == 0) { efree(HT_GET_DATA_ADDR(ht)); ht->nTableMask = HT_MIN_MASK; if (EXPECTED(!ZCG(current_persistent_script)->corrupted)) { HT_SET_DATA_ADDR(ht, &ZCSG(uninitialized_bucket)); } else { HT_SET_DATA_ADDR(ht, &uninitialized_bucket); } HT_FLAGS(ht) |= HASH_FLAG_UNINITIALIZED; return; } if (HT_FLAGS(ht) & HASH_FLAG_PACKED) { void *data = HT_GET_DATA_ADDR(ht); data = zend_shared_memdup_free(data, HT_USED_SIZE(ht)); HT_SET_DATA_ADDR(ht, data); } else if (ht->nNumUsed < (uint32_t)(-(int32_t)ht->nTableMask) / 4) { /* compact table */ void *old_data = HT_GET_DATA_ADDR(ht); Bucket *old_buckets = ht->arData; uint32_t hash_size; if (ht->nNumUsed <= HT_MIN_SIZE) { hash_size = HT_MIN_SIZE * 2; } else { hash_size = (uint32_t)(-(int32_t)ht->nTableMask); while (hash_size >> 2 > ht->nNumUsed) { hash_size >>= 1; } } ht->nTableMask = (uint32_t)(-(int32_t)hash_size); ZEND_ASSERT(((zend_uintptr_t)ZCG(mem) & 0x7) == 0); /* should be 8 byte aligned */ HT_SET_DATA_ADDR(ht, ZCG(mem)); ZCG(mem) = (void*)((char*)ZCG(mem) + ZEND_ALIGNED_SIZE((hash_size * sizeof(uint32_t)) + (ht->nNumUsed * sizeof(Bucket)))); HT_HASH_RESET(ht); memcpy(ht->arData, old_buckets, ht->nNumUsed * sizeof(Bucket)); efree(old_data); for (idx = 0; idx < ht->nNumUsed; idx++) { p = ht->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; /* persist bucket and key */ if (p->key) { zend_accel_store_interned_string(p->key); } /* persist the data itself */ pPersistElement(&p->val); nIndex = p->h | ht->nTableMask; Z_NEXT(p->val) = HT_HASH(ht, nIndex); HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); } return; } else {