static void zend_hash_persist(HashTable *ht, zend_persist_func_t pPersistElement) { uint32_t idx, nIndex; Bucket *p; if (!(ht->u.flags & HASH_FLAG_INITIALIZED)) { HT_SET_DATA_ADDR(ht, &uninitialized_bucket); return; } if (ht->nNumUsed == 0) { efree(HT_GET_DATA_ADDR(ht)); ht->nTableMask = HT_MIN_MASK; HT_SET_DATA_ADDR(ht, &uninitialized_bucket); ht->u.flags &= ~HASH_FLAG_INITIALIZED; return; } if (ht->u.flags & HASH_FLAG_PACKED) { void *data = HT_GET_DATA_ADDR(ht); zend_accel_store(data, HT_USED_SIZE(ht)); HT_SET_DATA_ADDR(ht, data); } else if (ht->nNumUsed < (uint32_t)(-(int32_t)ht->nTableMask) / 2) { /* compact table */ void *old_data = HT_GET_DATA_ADDR(ht); Bucket *old_buckets = ht->arData; uint32_t hash_size; if (ht->nNumUsed <= HT_MIN_SIZE) { hash_size = HT_MIN_SIZE; } else { hash_size = (uint32_t)(-(int32_t)ht->nTableMask); while (hash_size >> 1 > ht->nNumUsed) { hash_size >>= 1; } } ht->nTableMask = (uint32_t)(-(int32_t)hash_size); ZEND_ASSERT(((zend_uintptr_t)ZCG(mem) & 0x7) == 0); /* should be 8 byte aligned */ HT_SET_DATA_ADDR(ht, ZCG(mem)); ZCG(mem) = (void*)((char*)ZCG(mem) + ZEND_ALIGNED_SIZE((hash_size * sizeof(uint32_t)) + (ht->nNumUsed * sizeof(Bucket)))); HT_HASH_RESET(ht); memcpy(ht->arData, old_buckets, ht->nNumUsed * sizeof(Bucket)); efree(old_data); for (idx = 0; idx < ht->nNumUsed; idx++) { p = ht->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; /* persist bucket and key */ if (p->key) { zend_accel_store_interned_string(p->key); } /* persist the data itself */ pPersistElement(&p->val); nIndex = p->h | ht->nTableMask; Z_NEXT(p->val) = HT_HASH(ht, nIndex); HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); } return; } else {
void *zend_shared_alloc(size_t size) { int i; unsigned int block_size = ZEND_ALIGNED_SIZE(size); #if 1 if (!ZCG(locked)) { zend_accel_error(ACCEL_LOG_ERROR, "Shared memory lock not obtained"); } #endif if (block_size > ZSMMG(shared_free)) { /* No hope to find a big-enough block */ SHARED_ALLOC_FAILED(); return NULL; } for (i = 0; i < ZSMMG(shared_segments_count); i++) { if (ZSMMG(shared_segments)[i]->size - ZSMMG(shared_segments)[i]->pos >= block_size) { /* found a valid block */ void *retval = (void *) (((char *) ZSMMG(shared_segments)[i]->p) + ZSMMG(shared_segments)[i]->pos); ZSMMG(shared_segments)[i]->pos += block_size; ZSMMG(shared_free) -= block_size; memset(retval, 0, block_size); ZEND_ASSERT(((zend_uintptr_t)retval & 0x7) == 0); /* should be 8 byte aligned */ return retval; } } SHARED_ALLOC_FAILED(); return NULL; }
static void zend_persist_op_array(zval *zv) { memcpy(ZCG(arena_mem), Z_PTR_P(zv), sizeof(zend_op_array)); zend_shared_alloc_register_xlat_entry(Z_PTR_P(zv), ZCG(arena_mem)); Z_PTR_P(zv) = ZCG(arena_mem); ZCG(arena_mem) = (void*)((char*)ZCG(arena_mem) + ZEND_ALIGNED_SIZE(sizeof(zend_op_array))); zend_persist_op_array_ex(Z_PTR_P(zv), NULL); }
int zend_shared_memdup_size(void *source, size_t size) { void *old_p; if ((old_p = zend_hash_index_find_ptr(&ZCG(xlat_table), (zend_ulong)source)) != NULL) { /* we already duplicated this pointer */ return 0; } zend_shared_alloc_register_xlat_entry(source, source); return ZEND_ALIGNED_SIZE(size); }
int zend_shared_memdup_size(void *source, size_t size) { void **old_p; if (zend_hash_index_find(&xlat_table, (ulong)source, (void **)&old_p) == SUCCESS) { /* we already duplicated this pointer */ return 0; } zend_shared_alloc_register_xlat_entry(source, source); return ZEND_ALIGNED_SIZE(size); }
void *_zend_shared_memdup(void *source, size_t size, zend_bool free_source) { void *old_p, *retval; if ((old_p = zend_hash_index_find_ptr(&ZCG(xlat_table), (zend_ulong)source)) != NULL) { /* we already duplicated this pointer */ return old_p; } retval = ZCG(mem); ZCG(mem) = (void*)(((char*)ZCG(mem)) + ZEND_ALIGNED_SIZE(size)); memcpy(retval, source, size); zend_shared_alloc_register_xlat_entry(source, retval); if (free_source) { efree(source); } return retval; }
void *_zend_shared_memdup(void *source, size_t size, zend_bool free_source TSRMLS_DC) { void **old_p, *retval; if (zend_hash_index_find(&xlat_table, (ulong)source, (void **)&old_p) == SUCCESS) { /* we already duplicated this pointer */ return *old_p; } retval = ZCG(mem);; ZCG(mem) = (void*)(((char*)ZCG(mem)) + ZEND_ALIGNED_SIZE(size)); memcpy(retval, source, size); if (free_source) { interned_efree((char*)source); } zend_shared_alloc_register_xlat_entry(source, retval); return retval; }
static void zend_persist_property_info(zval *zv) { zend_property_info *prop; memcpy(ZCG(arena_mem), Z_PTR_P(zv), sizeof(zend_property_info)); zend_shared_alloc_register_xlat_entry(Z_PTR_P(zv), ZCG(arena_mem)); prop = Z_PTR_P(zv) = ZCG(arena_mem); ZCG(arena_mem) = (void*)((char*)ZCG(arena_mem) + ZEND_ALIGNED_SIZE(sizeof(zend_property_info))); zend_accel_store_interned_string(prop->name); if (prop->doc_comment) { if (ZCG(accel_directives).save_comments) { zend_accel_store_string(prop->doc_comment); } else { if (!zend_shared_alloc_get_xlat_entry(prop->doc_comment)) { zend_shared_alloc_register_xlat_entry(prop->doc_comment, prop->doc_comment); } zend_string_release(prop->doc_comment); prop->doc_comment = NULL; } } }
static int zend_shared_alloc_reattach(size_t requested_size, char **error_in) { int err; void *wanted_mapping_base; char *mmap_base_file = get_mmap_base_file(); FILE *fp = fopen(mmap_base_file, "r"); MEMORY_BASIC_INFORMATION info; err = GetLastError(); if (!fp) { zend_win_error_message(ACCEL_LOG_WARNING, mmap_base_file, err); zend_win_error_message(ACCEL_LOG_FATAL, "Unable to open base address file", err); *error_in="fopen"; return ALLOC_FAILURE; } if (!fscanf(fp, "%p", &wanted_mapping_base)) { err = GetLastError(); zend_win_error_message(ACCEL_LOG_FATAL, "Unable to read base address", err); *error_in="read mapping base"; fclose(fp); return ALLOC_FAILURE; } fclose(fp); /* Check if the requested address space is free */ if (VirtualQuery(wanted_mapping_base, &info, sizeof(info)) == 0 || info.State != MEM_FREE || info.RegionSize < requested_size) { #if ENABLE_FILE_CACHE_FALLBACK if (ZCG(accel_directives).file_cache && ZCG(accel_directives).file_cache_fallback) { size_t pre_size, wanted_mb_save; wanted_mb_save = (size_t)wanted_mapping_base; err = ERROR_INVALID_ADDRESS; zend_win_error_message(ACCEL_LOG_WARNING, "Base address marks unusable memory region (fall-back to file cache)", err); pre_size = ZEND_ALIGNED_SIZE(sizeof(zend_smm_shared_globals)) + ZEND_ALIGNED_SIZE(sizeof(zend_shared_segment)) + ZEND_ALIGNED_SIZE(sizeof(void *)) + ZEND_ALIGNED_SIZE(sizeof(int)); /* Map only part of SHM to have access opcache shared globals */ mapping_base = MapViewOfFileEx(memfile, FILE_MAP_ALL_ACCESS, 0, 0, pre_size + ZEND_ALIGNED_SIZE(sizeof(zend_accel_shared_globals)), NULL); if (mapping_base == NULL) { err = GetLastError(); zend_win_error_message(ACCEL_LOG_FATAL, "Unable to reattach to opcache shared globals", err); return ALLOC_FAILURE; } accel_shared_globals = (zend_accel_shared_globals *)((char *)((zend_smm_shared_globals *)mapping_base)->app_shared_globals + ((char *)mapping_base - (char *)wanted_mb_save)); /* Make this process to use file-cache only */ ZCG(accel_directives).file_cache_only = 1; return ALLOC_FALLBACK; } #endif err = ERROR_INVALID_ADDRESS; zend_win_error_message(ACCEL_LOG_FATAL, "Base address marks unusable memory region. Please setup opcache.file_cache and opcache.file_cache_callback directives for more convenient Opcache usage", err); return ALLOC_FAILURE; } mapping_base = MapViewOfFileEx(memfile, FILE_MAP_ALL_ACCESS, 0, 0, 0, wanted_mapping_base); err = GetLastError(); if (mapping_base == NULL) { if (err == ERROR_INVALID_ADDRESS) { zend_win_error_message(ACCEL_LOG_FATAL, "Unable to reattach to base address", err); return ALLOC_FAILURE; } return ALLOC_FAIL_MAPPING; } smm_shared_globals = (zend_smm_shared_globals *) mapping_base; return SUCCESSFULLY_REATTACHED; }
static void zend_persist_class_entry(zval *zv) { zend_class_entry *ce = Z_PTR_P(zv); if (ce->type == ZEND_USER_CLASS) { memcpy(ZCG(arena_mem), Z_PTR_P(zv), sizeof(zend_class_entry)); zend_shared_alloc_register_xlat_entry(Z_PTR_P(zv), ZCG(arena_mem)); ce = Z_PTR_P(zv) = ZCG(arena_mem); ZCG(arena_mem) = (void*)((char*)ZCG(arena_mem) + ZEND_ALIGNED_SIZE(sizeof(zend_class_entry))); zend_accel_store_interned_string(ce->name); zend_hash_persist(&ce->function_table, zend_persist_op_array); if (ce->default_properties_table) { int i; zend_accel_store(ce->default_properties_table, sizeof(zval) * ce->default_properties_count); for (i = 0; i < ce->default_properties_count; i++) { zend_persist_zval(&ce->default_properties_table[i]); } } if (ce->default_static_members_table) { int i; zend_accel_store(ce->default_static_members_table, sizeof(zval) * ce->default_static_members_count); for (i = 0; i < ce->default_static_members_count; i++) { zend_persist_zval(&ce->default_static_members_table[i]); } } ce->static_members_table = NULL; zend_hash_persist(&ce->constants_table, zend_persist_zval); if (ZEND_CE_FILENAME(ce)) { /* do not free! PHP has centralized filename storage, compiler will free it */ zend_accel_memdup_string(ZEND_CE_FILENAME(ce)); } if (ZEND_CE_DOC_COMMENT(ce)) { if (ZCG(accel_directives).save_comments) { zend_accel_store_string(ZEND_CE_DOC_COMMENT(ce)); } else { if (!zend_shared_alloc_get_xlat_entry(ZEND_CE_DOC_COMMENT(ce))) { zend_shared_alloc_register_xlat_entry(ZEND_CE_DOC_COMMENT(ce), ZEND_CE_DOC_COMMENT(ce)); zend_string_release(ZEND_CE_DOC_COMMENT(ce)); } ZEND_CE_DOC_COMMENT(ce) = NULL; } } zend_hash_persist(&ce->properties_info, zend_persist_property_info); if (ce->num_interfaces && ce->interfaces) { efree(ce->interfaces); } ce->interfaces = NULL; /* will be filled in on fetch */ if (ce->num_traits && ce->traits) { efree(ce->traits); } ce->traits = NULL; if (ce->trait_aliases) { int i = 0; while (ce->trait_aliases[i]) { if (ce->trait_aliases[i]->trait_method) { if (ce->trait_aliases[i]->trait_method->method_name) { zend_accel_store_interned_string(ce->trait_aliases[i]->trait_method->method_name); } if (ce->trait_aliases[i]->trait_method->class_name) { zend_accel_store_interned_string(ce->trait_aliases[i]->trait_method->class_name); } ce->trait_aliases[i]->trait_method->ce = NULL; zend_accel_store(ce->trait_aliases[i]->trait_method, sizeof(zend_trait_method_reference)); } if (ce->trait_aliases[i]->alias) { zend_accel_store_interned_string(ce->trait_aliases[i]->alias); } zend_accel_store(ce->trait_aliases[i], sizeof(zend_trait_alias)); i++; } zend_accel_store(ce->trait_aliases, sizeof(zend_trait_alias*) * (i + 1)); } if (ce->trait_precedences) { int i = 0; while (ce->trait_precedences[i]) { zend_accel_store_interned_string(ce->trait_precedences[i]->trait_method->method_name); zend_accel_store_interned_string(ce->trait_precedences[i]->trait_method->class_name); ce->trait_precedences[i]->trait_method->ce = NULL; zend_accel_store(ce->trait_precedences[i]->trait_method, sizeof(zend_trait_method_reference)); if (ce->trait_precedences[i]->exclude_from_classes) { int j = 0; while (ce->trait_precedences[i]->exclude_from_classes[j].class_name) { zend_accel_store_interned_string(ce->trait_precedences[i]->exclude_from_classes[j].class_name); j++; } zend_accel_store(ce->trait_precedences[i]->exclude_from_classes, sizeof(zend_class_entry*) * (j + 1)); } zend_accel_store(ce->trait_precedences[i], sizeof(zend_trait_precedence)); i++; } zend_accel_store( ce->trait_precedences, sizeof(zend_trait_precedence*) * (i + 1)); } } }
static void zend_hash_persist(HashTable *ht, zend_persist_func_t pPersistElement) { uint32_t idx, nIndex; Bucket *p; HT_FLAGS(ht) |= HASH_FLAG_STATIC_KEYS; ht->pDestructor = NULL; ht->nInternalPointer = 0; if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) { if (EXPECTED(!ZCG(current_persistent_script)->corrupted)) { HT_SET_DATA_ADDR(ht, &ZCSG(uninitialized_bucket)); } else { HT_SET_DATA_ADDR(ht, &uninitialized_bucket); } return; } if (ht->nNumUsed == 0) { efree(HT_GET_DATA_ADDR(ht)); ht->nTableMask = HT_MIN_MASK; if (EXPECTED(!ZCG(current_persistent_script)->corrupted)) { HT_SET_DATA_ADDR(ht, &ZCSG(uninitialized_bucket)); } else { HT_SET_DATA_ADDR(ht, &uninitialized_bucket); } HT_FLAGS(ht) |= HASH_FLAG_UNINITIALIZED; return; } if (HT_FLAGS(ht) & HASH_FLAG_PACKED) { void *data = HT_GET_DATA_ADDR(ht); data = zend_shared_memdup_free(data, HT_USED_SIZE(ht)); HT_SET_DATA_ADDR(ht, data); } else if (ht->nNumUsed < (uint32_t)(-(int32_t)ht->nTableMask) / 4) { /* compact table */ void *old_data = HT_GET_DATA_ADDR(ht); Bucket *old_buckets = ht->arData; uint32_t hash_size; if (ht->nNumUsed <= HT_MIN_SIZE) { hash_size = HT_MIN_SIZE * 2; } else { hash_size = (uint32_t)(-(int32_t)ht->nTableMask); while (hash_size >> 2 > ht->nNumUsed) { hash_size >>= 1; } } ht->nTableMask = (uint32_t)(-(int32_t)hash_size); ZEND_ASSERT(((zend_uintptr_t)ZCG(mem) & 0x7) == 0); /* should be 8 byte aligned */ HT_SET_DATA_ADDR(ht, ZCG(mem)); ZCG(mem) = (void*)((char*)ZCG(mem) + ZEND_ALIGNED_SIZE((hash_size * sizeof(uint32_t)) + (ht->nNumUsed * sizeof(Bucket)))); HT_HASH_RESET(ht); memcpy(ht->arData, old_buckets, ht->nNumUsed * sizeof(Bucket)); efree(old_data); for (idx = 0; idx < ht->nNumUsed; idx++) { p = ht->arData + idx; if (Z_TYPE(p->val) == IS_UNDEF) continue; /* persist bucket and key */ if (p->key) { zend_accel_store_interned_string(p->key); } /* persist the data itself */ pPersistElement(&p->val); nIndex = p->h | ht->nTableMask; Z_NEXT(p->val) = HT_HASH(ht, nIndex); HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx); } return; } else {