void SharedVariant::getStats(SharedVariantStats *stats) const { stats->initStats(); stats->variantCount = 1; switch (m_type) { case KindOfUninit: case KindOfNull: case KindOfBoolean: case KindOfInt64: case KindOfDouble: case KindOfStaticString: stats->dataSize = sizeof(m_data.dbl); stats->dataTotalSize = sizeof(SharedVariant); break; case KindOfObject: if (getIsObj()) { SharedVariantStats childStats; m_data.obj->getSizeStats(&childStats); stats->addChildStats(&childStats); break; } // fall through case KindOfString: stats->dataSize = m_data.str->size(); stats->dataTotalSize = sizeof(SharedVariant) + sizeof(StringData) + stats->dataSize; break; default: assert(is(KindOfArray)); if (getSerializedArray()) { stats->dataSize = m_data.str->size(); stats->dataTotalSize = sizeof(SharedVariant) + sizeof(StringData) + stats->dataSize; break; } if (getIsVector()) { stats->dataTotalSize = sizeof(SharedVariant) + sizeof(VectorData); stats->dataTotalSize += sizeof(SharedVariant*) * m_data.vec->m_size; for (size_t i = 0; i < m_data.vec->m_size; i++) { SharedVariant *v = m_data.vec->vals()[i]; SharedVariantStats childStats; v->getStats(&childStats); stats->addChildStats(&childStats); } } else { ImmutableMap *map = m_data.map; stats->dataTotalSize = sizeof(SharedVariant) + map->getStructSize(); for (int i = 0; i < map->size(); i++) { SharedVariantStats childStats; map->getKeyIndex(i)->getStats(&childStats); stats->addChildStats(&childStats); map->getValIndex(i)->getStats(&childStats); stats->addChildStats(&childStats); } } break; } }
bool ConcurrentTableSharedStore::get(CStrRef key, Variant &value) { const StoreValue *sval; SharedVariant *svar = NULL; ConditionalReadLock l(m_lock, !RuntimeOption::ApcConcurrentTableLockFree || m_lockingFlag); bool expired = false; bool promoteObj = false; { Map::const_accessor acc; if (!m_vars.find(acc, key.data())) { log_apc(std_apc_miss); return false; } else { sval = &acc->second; if (sval->expired()) { // Because it only has a read lock on the data, deletion from // expiration has to happen after the lock is released expired = true; } else { if (!sval->inMem()) { std::lock_guard<SmallLock> sval_lock(sval->lock); if (!sval->inMem()) { svar = unserialize(key, sval); if (!svar) return false; } else { svar = sval->var; } } else { svar = sval->var; } if (RuntimeOption::ApcAllowObj && svar->is(KindOfObject)) { // Hold ref here for later promoting the object svar->incRef(); promoteObj = true; } value = svar->toLocal(); stats_on_get(key.get(), svar); } } } if (expired) { log_apc(std_apc_miss); eraseImpl(key, true); return false; } log_apc(std_apc_hit); if (promoteObj) { handlePromoteObj(key, svar, value); // release the extra ref svar->decRef(); } return true; }
ThreadSharedVariant *ThreadSharedVariant::createAnother (CVarRef source, bool serialized, bool inner /* = false */) { SharedVariant *wrapped = source.getSharedVariant(); if (wrapped) { wrapped->incRef(); // static cast should be enough return (ThreadSharedVariant *)wrapped; } return new ThreadSharedVariant(source, serialized, inner); }
SharedVariant *SharedVariant::Create (CVarRef source, bool serialized, bool inner /* = false */, bool unserializeObj /* = false*/) { SharedVariant *wrapped = source.getSharedVariant(); if (wrapped && !unserializeObj) { wrapped->incRef(); // static cast should be enough return (SharedVariant *)wrapped; } return new SharedVariant(source, serialized, inner, unserializeObj); }
int SharedVariant::countReachable() const { int count = 1; if (getType() == KindOfArray) { int size = arrSize(); if (!isPacked()) { count += size; // for keys } for (int i = 0; i < size; i++) { SharedVariant* p = getValue(i); count += p->countReachable(); // for values } } return count; }
SharedVariant* SharedVariant::convertObj(CVarRef var) { if (!var.is(KindOfObject) || getObjAttempted()) { return NULL; } setObjAttempted(); PointerSet seen; ObjectData *obj = var.getObjectData(); CArrRef arr = obj->o_toArray(); if (arr->hasInternalReference(seen, true)) { return NULL; } SharedVariant *tmp = new SharedVariant(var, false, true, true); tmp->setObjAttempted(); return tmp; }
HOT_FUNC CVarRef SharedMap::getValueRef(ssize_t pos) const { SharedVariant *sv = getValueImpl(pos); DataType t = sv->getType(); if (!IS_REFCOUNTED_TYPE(t)) return sv->asCVarRef(); if (LIKELY(m_localCache != nullptr)) { assert(unsigned(pos) < size()); TypedValue* tv = &m_localCache[pos]; if (tv->m_type != KindOfUninit) return tvAsCVarRef(tv); } else { static_assert(KindOfUninit == 0, "must be 0 since we use smart_calloc"); m_localCache = (TypedValue*) smart_calloc(size(), sizeof(TypedValue)); } TypedValue* tv = &m_localCache[pos]; tvAsVariant(tv) = sv->toLocal(); assert(tv->m_type != KindOfUninit); return tvAsCVarRef(tv); }
SharedVariant* SharedVariant::convertObj(CVarRef var) { if (!var.is(KindOfObject) || getObjAttempted()) { return nullptr; } setObjAttempted(); ObjectData *obj = var.getObjectData(); if (obj->instanceof(SystemLib::s_SerializableClass)) { // should also check the object itself return nullptr; } PointerSet seen; if (obj->hasInternalReference(seen, true)) { return nullptr; } SharedVariant *tmp = new SharedVariant(var, false, true, true); tmp->setObjAttempted(); return tmp; }
SharedVariant* SharedVariant::convertObj(CVarRef var) { if (!var.is(KindOfObject) || getObjAttempted()) { return NULL; } setObjAttempted(); PointerSet seen; ObjectData *obj = var.getObjectData(); if (obj->o_instanceof("Serializable")) { // should also check the object itself return NULL; } CArrRef arr = obj->o_toArray(); if (arr->hasInternalReference(seen, true)) { return NULL; } SharedVariant *tmp = new SharedVariant(var, false, true, true); tmp->setObjAttempted(); return tmp; }
bool ConcurrentTableSharedStore::handlePromoteObj(CStrRef key, SharedVariant* svar, CVarRef value) { SharedVariant *converted = svar->convertObj(value); if (converted) { Map::accessor acc; if (!m_vars.find(acc, key.data())) { // There is a chance another thread deletes the key when this thread is // converting the object. In that case, we just bail converted->decRef(); return false; } // A write lock was acquired during find StoreValue *sval = &acc->second; SharedVariant *sv = sval->var; // sv may not be same as svar here because some other thread may have // updated it already, check before updating if (sv == svar && !sv->isUnserializedObj()) { int64 ttl = sval->expiry ? sval->expiry - time(NULL) : 0; stats_on_update(key.get(), sval, converted, ttl); sval->var = converted; sv->decRef(); return true; } converted->decRef(); } return false; }
HOT_FUNC CVarRef SharedMap::getValueRef(ssize_t pos) const { SharedVariant *sv = m_arr->getValue(pos); DataType t = sv->getType(); if (!IS_REFCOUNTED_TYPE(t)) return sv->asCVarRef(); if (LIKELY(m_localCache != NULL)) { Variant *pv; ArrayData *escalated DEBUG_ONLY = m_localCache->ZendArray::lvalPtr((int64)pos, pv, false, false); assert(!escalated); if (pv) return *pv; } else { m_localCache = NEW(ZendArray)(); m_localCache->incRefCount(); } Variant v = sv->toLocal(); Variant *r; ArrayData *escalated DEBUG_ONLY = m_localCache->ZendArray::addLval((int64)pos, r, false); assert(!escalated); *r = v; return *r; }
bool ConcurrentTableSharedStore::store(CStrRef key, CVarRef val, int64 ttl, bool overwrite /* = true */) { bool stats = RuntimeOption::EnableStats && RuntimeOption::EnableAPCStats; bool statsDetail = RuntimeOption::EnableAPCSizeStats && RuntimeOption::EnableAPCSizeGroup; StoreValue *sval; SharedVariant* var = construct(key, val); ReadLock l(m_lock); const char *kcp = strdup(key.data()); bool present; time_t expiry; { Map::accessor acc; present = !m_vars.insert(acc, kcp); sval = &acc->second; if (present) { free((void *)kcp); if (overwrite || sval->expired()) { if (statsDetail) { SharedStoreStats::onDelete(key.get(), sval->var, true); } sval->var->decRef(); if (RuntimeOption::EnableAPCSizeStats && !check_skip(key.data())) { int32 size = var->getSpaceUsage(); SharedStoreStats::updateDirect(sval->size, size); sval->size = size; } } else { var->decRef(); return false; } } else { if (RuntimeOption::EnableAPCSizeStats) { int32 size = var->getSpaceUsage(); SharedStoreStats::addDirect(key.size(), size); sval->size = size; } } sval->set(var, ttl); expiry = sval->expiry; if (statsDetail) { SharedStoreStats::onStore(key.get(), var, ttl, false); } } if (RuntimeOption::ApcExpireOnSets) { if (ttl) { addToExpirationQueue(key.data(), expiry); } purgeExpired(); } if (stats) { if (present) { ServerStats::Log("apc.update", 1); } else { ServerStats::Log("apc.new", 1); if (RuntimeOption::EnableStats && RuntimeOption::EnableAPCKeyStats) { string prefix = "apc.new."; prefix += GetSkeleton(key); ServerStats::Log(prefix, 1); } } } return true; }
bool ConcurrentTableSharedStore::get(CStrRef key, Variant &value) { bool stats = RuntimeOption::EnableStats && RuntimeOption::EnableAPCStats; bool statsFetch = RuntimeOption::EnableAPCSizeStats && RuntimeOption::EnableAPCFetchStats; const StoreValue *val; SharedVariant *svar = NULL; ReadLock l(m_lock); bool expired = false; { Map::const_accessor acc; if (!m_vars.find(acc, key.data())) { if (stats) ServerStats::Log("apc.miss", 1); return false; } else { val = &acc->second; if (val->expired()) { // Because it only has a read lock on the data, deletion from // expiration has to happen after the lock is released expired = true; } else { svar = val->var; if (RuntimeOption::ApcAllowObj) { // Hold ref here svar->incRef(); } value = svar->toLocal(); if (statsFetch) { SharedStoreStats::onGet(key.get(), svar); } } } } if (expired) { if (stats) { ServerStats::Log("apc.miss", 1); } eraseImpl(key, true); return false; } if (stats) { ServerStats::Log("apc.hit", 1); } if (RuntimeOption::ApcAllowObj) { bool statsDetail = RuntimeOption::EnableAPCSizeStats && RuntimeOption::EnableAPCSizeGroup; SharedVariant *converted = svar->convertObj(value); if (converted) { Map::accessor acc; m_vars.find(acc, key.data()); // start a write lock StoreValue *sval = &acc->second; SharedVariant *sv = sval->var; // sv may not be same as svar here because some other thread may have // updated it already, check before updating if (!sv->isUnserializedObj()) { if (statsDetail) { SharedStoreStats::onDelete(key.get(), sv, true); } sval->var = converted; sv->decRef(); if (RuntimeOption::EnableAPCSizeStats) { int32 newSize = converted->getSpaceUsage(); SharedStoreStats::updateDirect(sval->size, newSize); sval->size = newSize; } if (statsDetail) { int64 ttl = sval->expiry ? sval->expiry - time(NULL) : 0; SharedStoreStats::onStore(key.get(), converted, ttl, false); } } else { converted->decRef(); } } // release the extra ref svar->decRef(); } return true; }
bool ConcurrentTableSharedStore::store(CStrRef key, CVarRef value, int64 ttl, bool overwrite /* = true */) { StoreValue *sval; SharedVariant* svar = construct(value); ConditionalReadLock l(m_lock, !RuntimeOption::ApcConcurrentTableLockFree || m_lockingFlag); const char *kcp = strdup(key.data()); bool present; time_t expiry = 0; bool overwritePrime = false; { Map::accessor acc; present = !m_vars.insert(acc, kcp); sval = &acc->second; bool update = false; if (present) { free((void *)kcp); if (overwrite || sval->expired()) { // if ApcTTLLimit is set, then only primed keys can have expiry == 0 overwritePrime = (sval->expiry == 0); if (sval->inMem()) { stats_on_update(key.get(), sval, svar, adjust_ttl(ttl, overwritePrime)); sval->var->decRef(); update = true; } else { // mark the inFile copy invalid since we are updating the key sval->sAddr = NULL; sval->sSize = 0; } } else { svar->decRef(); return false; } } int64 adjustedTtl = adjust_ttl(ttl, overwritePrime); if (check_noTTL(key.data())) { adjustedTtl = 0; } sval->set(svar, adjustedTtl); expiry = sval->expiry; if (!update) { stats_on_add(key.get(), sval, adjustedTtl, false, false); } } if (expiry) { addToExpirationQueue(key.data(), expiry); } if (RuntimeOption::ApcExpireOnSets) { purgeExpired(); } if (present) { log_apc(std_apc_update); } else { log_apc(std_apc_new); if (RuntimeOption::EnableStats && RuntimeOption::EnableAPCKeyStats) { string prefix = "apc.new." + GetSkeleton(key); ServerStats::Log(prefix, 1); } } return true; }