StringData* StringData::Make(size_t reserveLen) { auto const allocRet = allocFlatForLen(reserveLen); auto const sd = allocRet.first; auto const cc = allocRet.second; auto const data = reinterpret_cast<char*>(sd + 1); data[0] = 0; sd->m_data = data; sd->m_hdr.init(cc, HeaderKind::String, 1); sd->m_lenAndHash = 0; // len=hash=0 assert(sd->hasExactlyOneRef()); assert(sd->isFlat()); assert(sd->checkSane()); return sd; }
StringData* StringData::Make(const StringData* s1, const StringData* s2) { auto const len = s1->m_len + s2->m_len; // `memcpy8()' could overrun the buffer by at most 7 bytes, so we allocate 6 // more bytes here, which (together with the trailing 0) makes it safe. auto const sd = allocFlatForLenSmall(len + 6); sd->m_lenAndHash = len; // hash=0 auto const data = reinterpret_cast<char*>(sd + 1); auto const next = memcpy8(data, s1->data(), s1->m_len); *memcpy8(next, s2->data(), s2->m_len) = 0; assert(sd->hasExactlyOneRef()); assert(sd->isFlat()); assert(sd->checkSane()); return sd; }
ObjectData* AsyncGenerator::Create(const ActRec* fp, size_t numSlots, jit::TCA resumeAddr, Offset resumeOffset) { assert(fp); assert(!fp->resumed()); assert(fp->func()->isAsyncGenerator()); void* genDataPtr = Resumable::Create<false, sizeof(AsyncGenerator) + sizeof(c_AsyncGenerator)>( fp, numSlots, resumeAddr, resumeOffset); AsyncGenerator* genData = new (genDataPtr) AsyncGenerator(); auto const gen = new (genData + 1) c_AsyncGenerator(); assert(gen->hasExactlyOneRef()); assert(gen->noDestruct()); genData->setState(State::Created); genData->m_waitHandle = nullptr; return static_cast<ObjectData*>(gen); }
MixedArray* StructArray::ToMixedHeader(size_t neededSize) { auto const scale = computeScaleFromSize(neededSize); auto const ad = reqAllocArray(scale); ad->m_sizeAndPos = 0; // We'll set size and pos later. ad->m_hdr.init(HeaderKind::Mixed, 1); ad->m_scale_used = scale; // used=0 ad->m_nextKI = 0; // There were never any numeric indices. assert(ad->kind() == ArrayData::kMixedKind); assert(ad->m_size == 0); assert(ad->m_pos == 0); assert(ad->hasExactlyOneRef()); assert(ad->m_used == 0); assert(ad->m_scale == scale); return ad; }
StringData* StringData::Make(folly::StringPiece r1, folly::StringPiece r2, folly::StringPiece r3) { auto const len = r1.size() + r2.size() + r3.size(); auto const sd = allocFlatForLenSmall(len); sd->m_lenAndHash = len; // hash=0 auto p = reinterpret_cast<char*>(sd + 1); p = static_cast<char*>(memcpy(p, r1.data(), r1.size())); p = static_cast<char*>(memcpy(p + r1.size(), r2.data(), r2.size())); p = static_cast<char*>(memcpy(p + r2.size(), r3.data(), r3.size())); p[r3.size()] = 0; assert(sd->hasExactlyOneRef()); assert(sd->isFlat()); assert(sd->checkSane()); return sd; }
StringData* StringData::Make(StringSlice r1, StringSlice r2, StringSlice r3) { auto const len = r1.len + r2.len + r3.len; auto const sd = allocFlatForLenSmall(len); sd->m_lenAndHash = len; // hash=0 char* p = reinterpret_cast<char*>(sd + 1); p = static_cast<char*>(memcpy(p, r1.ptr, r1.len)); p = static_cast<char*>(memcpy(p + r1.len, r2.ptr, r2.len)); p = static_cast<char*>(memcpy(p + r2.len, r3.ptr, r3.len)); p[r3.len] = 0; assert(sd->hasExactlyOneRef()); assert(sd->isFlat()); assert(sd->checkSane()); return sd; }
StringData* StringData::Make(const APCString* shared) { // No need to check if len > MaxSize, because if it were we'd never // have made the StringData in the APCVariant without throwing. assert(size_t(shared->getStringData()->size()) <= size_t(MaxSize)); auto const data = shared->getStringData(); auto const len = data->size(); if (UNLIKELY(len > SmallStringReserve)) { return MakeAPCSlowPath(shared); } // small-string path auto const psrc = data->data(); auto const hash = data->m_hash & STRHASH_MASK; assert(hash != 0); static_assert(SmallStringReserve + sizeof(StringData) + 1 < CapCode::Threshold, ""); auto const need = sizeof(StringData) + len + 1; auto const cap = MemoryManager::smallSizeClass(need); auto const sd = static_cast<StringData*>(MM().mallocSmallSize(cap)); auto const pdst = reinterpret_cast<char*>(sd + 1); auto const cc = CapCode::ceil(cap - kCapOverhead); assert(cc.code == cap - kCapOverhead); sd->m_data = pdst; sd->m_hdr.init(cc, HeaderKind::String, 1); sd->m_lenAndHash = len | int64_t{hash} << 32; // pdst[len] = 0; auto const mcret = memcpy(pdst, psrc, len + 1); auto const ret = reinterpret_cast<StringData*>(mcret) - 1; // Recalculating ret from mcret avoids a spill. // Note: this return value thing is doing a dead lea into %rsi in // the caller for some reason. assert(ret == sd); assert(ret->m_len == len); assert(ret->hasExactlyOneRef()); assert(ret->m_hash == hash); assert(ret->isFlat()); assert(ret->checkSane()); return ret; }
ArrayData* StructArray::CopyStatic(const ArrayData* ad) { auto structArray = asStructArray(ad); auto shape = structArray->shape(); auto ret = StructArray::createStatic(shape, structArray->size()); ret->m_pos = structArray->m_pos; auto const srcData = structArray->data(); auto const size = structArray->size(); auto const stop = srcData + size; auto targetData = ret->data(); for (auto ptr = srcData; ptr != stop; ++ptr, ++targetData) { tvDupFlattenVars(ptr, targetData, structArray); } assert(ret->hasExactlyOneRef()); return ret; }
// State transition from Mode::Shared to Mode::Flat. StringData* StringData::escalate(size_t cap) { assert(isShared() && !isStatic() && cap >= m_len); auto const allocRet = allocFlatForLen(cap); auto const sd = allocRet.first; auto const cc = allocRet.second; auto const data = reinterpret_cast<char*>(sd + 1); sd->m_data = data; sd->m_hdr.init(cc, HeaderKind::String, 1); sd->m_lenAndHash = m_lenAndHash; *memcpy8(data, m_data, m_len) = 0; assert(sd->hasExactlyOneRef()); assert(sd->isFlat()); assert(sd->checkSane()); return sd; }
StringData* StringData::Make(StringSlice sl, CopyStringMode) { auto const sd = allocFlatForLenSmall(sl.len); sd->m_lenAndHash = sl.len; // hash=0 auto const data = reinterpret_cast<char*>(sd + 1); data[sl.len] = 0; auto const mcret = memcpy(data, sl.ptr, sl.len); auto const ret = reinterpret_cast<StringData*>(mcret) - 1; // Recalculating ret from mcret avoids a spill. assert(ret == sd); assert(ret->m_len == sl.len); assert(ret->hasExactlyOneRef()); assert(ret->m_hash == 0); assert(ret->isFlat()); assert(ret->checkSane()); return ret; }
StringData* StringData::Make(StringSlice r1, StringSlice r2) { auto const len = r1.len + r2.len; auto const allocRet = allocFlatForLen(len); auto const sd = allocRet.first; auto const cc = allocRet.second; auto const data = reinterpret_cast<char*>(sd + 1); sd->m_data = data; sd->m_hdr.init(cc, HeaderKind::String, 1); sd->m_lenAndHash = len; // hash=0 memcpy(data, r1.ptr, r1.len); memcpy(data + r1.len, r2.ptr, r2.len); data[len] = 0; assert(sd->hasExactlyOneRef()); assert(sd->isFlat()); assert(sd->checkSane()); return sd; }
ArrayData* StructArray::Copy(const ArrayData* ad) { auto old = asStructArray(ad); auto shape = old->shape(); auto result = StructArray::createNoCopy(shape, shape->size()); result->m_pos = old->m_pos; assert(result->m_size == result->shape()->size()); assert(result->size() == old->size()); auto const srcData = old->data(); auto const stop = srcData + old->size(); auto targetData = result->data(); for (auto ptr = srcData; ptr != stop; ++ptr, ++targetData) { tvDupFlattenVars(ptr, targetData, old); } assert(result->m_size == result->shape()->size()); assert(result->hasExactlyOneRef()); return result; }
/* * Helper for empty array -> packed transitions. Creates an array * with one element. The element is transferred into the array (should * already be incref'd). */ ALWAYS_INLINE ArrayLval EmptyArray::MakePackedInl(TypedValue tv) { auto const cap = kPackedSmallSize; auto const ad = static_cast<ArrayData*>( MM().objMalloc(sizeof(ArrayData) + cap * sizeof(TypedValue)) ); assert(cap == CapCode::ceil(cap).code); ad->m_sizeAndPos = 1; // size=1, pos=0 ad->initHeader(CapCode::exact(cap), HeaderKind::Packed, 1); auto const lval = reinterpret_cast<TypedValue*>(ad + 1); lval->m_data = tv.m_data; lval->m_type = tv.m_type; assert(ad->kind() == ArrayData::kPackedKind); assert(ad->m_size == 1); assert(ad->m_pos == 0); assert(ad->hasExactlyOneRef()); assert(PackedArray::checkInvariants(ad)); return { ad, &tvAsVariant(lval) }; }
MixedArray* StructArray::ToMixedCopy(const StructArray* old) { auto const oldSize = old->size(); auto const ad = ToMixedHeader(oldSize + 1); auto const srcData = old->data(); auto shape = old->shape(); memset(ad->hashTab(), static_cast<uint8_t>(MixedArray::Empty), sizeof(int32_t) * ad->hashSize()); for (auto i = 0; i < oldSize; ++i) { auto key = const_cast<StringData*>(shape->keyForOffset(i)); auto& e = ad->addKeyAndGetElem(key); tvDupFlattenVars(&srcData[i], &e.data, old); } ad->m_pos = old->m_pos; assert(ad->checkInvariants()); assert(!ad->isFull()); assert(ad->hasExactlyOneRef()); return ad; }
StructArray* StructArray::Grow(StructArray* old, Shape* newShape) { assert(old->shape()->transitionRequiresGrowth()); auto result = StructArray::create(newShape, old->data(), old->shape()->size()); result->m_size = newShape->size(); if (UNLIKELY(strong_iterators_exist())) { move_strong_iterators(result, old); } old->m_size = 0; if (debug) { // For debug builds, set m_pos to 0 as well to make the // asserts in checkInvariants() happy. old->m_pos = 0; } assert(result->hasExactlyOneRef()); return result; }
NEVER_INLINE StringData* StringData::MakeAPCSlowPath(const APCString* shared) { auto const sd = static_cast<StringData*>( MM().mallocSmallSize(sizeof(StringData) + sizeof(SharedPayload)) ); auto const data = shared->getStringData(); sd->m_data = const_cast<char*>(data->m_data); sd->m_hdr.init(data->m_hdr, 1); sd->m_lenAndHash = data->m_lenAndHash; sd->sharedPayload()->shared = shared; sd->enlist(); shared->getHandle()->reference(); assert(sd->m_len == data->size()); assert(sd->m_hdr.aux == data->m_hdr.aux); assert(sd->m_hdr.kind == HeaderKind::String); assert(sd->hasExactlyOneRef()); assert(sd->m_hash == data->m_hash); assert(sd->isShared()); assert(sd->checkSane()); return sd; }
/* Classes with NativeData structs allocate extra memory prior * to the ObjectData. * * [NativeNode][padding][NativeData][ObjectData](prop0)...(propN) * /\ * ObjectData* points here * * padding is added by alignTypedValue(sizeof(NativeData)) to ensure * that ObjectData* falls on a 16-aligned boundary. NativeData is * sizeof(NativeData) (NativeDataInfo.sz) bytes for the custom struct. * NativeNode is a link in the NativeData sweep list for this ND block */ ObjectData* nativeDataInstanceCtor(Class* cls) { auto ndi = cls->getNativeDataInfo(); size_t nativeDataSize = ndsize(ndi->sz); size_t nProps = cls->numDeclProperties(); size_t size = ObjectData::sizeForNProps(nProps) + nativeDataSize; auto node = reinterpret_cast<NativeNode*>( MM().objMalloc(size) ); node->obj_offset = nativeDataSize; node->hdr.kind = HeaderKind::NativeData; auto obj = new (reinterpret_cast<char*>(node) + nativeDataSize) ObjectData(cls); assert(obj->hasExactlyOneRef()); obj->setAttribute(static_cast<ObjectData::Attribute>(ndi->odattrs)); if (ndi->init) { ndi->init(obj); } if (ndi->sweep) { MM().addNativeObject(node); } return obj; }
StringData* StringData::Make(StringSlice sl, CopyStringMode) { auto const allocRet = allocFlatForLen(sl.len); auto const sd = allocRet.first; auto const cc = allocRet.second; auto const data = reinterpret_cast<char*>(sd + 1); sd->m_data = data; sd->m_hdr.init(cc, HeaderKind::String, 1); sd->m_lenAndHash = sl.len; // hash=0 data[sl.len] = 0; auto const mcret = memcpy(data, sl.ptr, sl.len); auto const ret = reinterpret_cast<StringData*>(mcret) - 1; // Recalculating ret from mcret avoids a spill. assert(ret == sd); assert(ret->m_len == sl.len); assert(ret->hasExactlyOneRef()); assert(ret->m_hash == 0); assert(ret->isFlat()); assert(ret->checkSane()); return ret; }
StringData* StringData::reserve(size_t cap) { assert(!isImmutable() && !hasMultipleRefs()); assert(isFlat()); if (cap <= capacity()) return this; cap = std::min(cap + cap / 4, size_t(MaxSize)); auto const sd = allocFlatForLenSmall(cap); // Request-allocated StringData are always aligned at 16 bytes, thus it is // safe to copy in 16-byte groups. This copies m_lenAndHash (8 bytes), the // characters (m_len bytes), add the trailing zero (1 byte). memcpy16_inline(&sd->m_lenAndHash, &m_lenAndHash, (m_len + 8 + 1 + 15) & ~0xF); assertx(reinterpret_cast<uintptr_t>(&m_lenAndHash) + 8 == reinterpret_cast<uintptr_t>(m_data)); assertx(reinterpret_cast<uintptr_t>(&m_lenAndHash) % 16 == 0); assert(sd->hasExactlyOneRef()); assert(sd->isFlat()); assert(sd->checkSane()); return sd; }
GlobalsArray::GlobalsArray(NameValueTable* tab) : ArrayData(kGlobalsKind) , m_tab(tab) { Variant arr(staticEmptyArray()); #define X(s,v) tab->set(makeStaticString(#s), v.asTypedValue()); X(argc, init_null_variant); X(argv, init_null_variant); X(_SERVER, arr); X(_GET, arr); X(_POST, arr); X(_COOKIE, arr); X(_FILES, arr); X(_ENV, arr); X(_REQUEST, arr); X(_SESSION, arr); X(HTTP_RAW_POST_DATA, init_null_variant); #undef X g_variables.set(this); assertx(hasExactlyOneRef()); }
/* * Helper for creating a single-element mixed array with a string key. * * Note: the key is not already incref'd, but the value must be. */ NEVER_INLINE ArrayLval EmptyArray::MakeMixed(StringData* key, TypedValue val) { auto const ad = reqAllocArray(MixedArray::SmallScale); MixedArray::InitSmall(ad, 1/*count*/, 1/*size*/, 0/*nextIntKey*/); auto const data = ad->data(); auto const hash = reinterpret_cast<int32_t*>(data + MixedArray::SmallSize); auto const khash = key->hash(); auto const mask = MixedArray::SmallMask; hash[khash & mask] = 0; data[0].setStrKey(key, khash); auto& lval = data[0].data; lval.m_data = val.m_data; lval.m_type = val.m_type; assert(ad->m_size == 1); assert(ad->m_pos == 0); assert(ad->m_scale == MixedArray::SmallScale); assert(ad->kind() == ArrayData::kMixedKind); assert(ad->hasExactlyOneRef()); assert(ad->m_used == 1); assert(ad->checkInvariants()); return { ad, &tvAsVariant(&lval) }; }
/* * Creating a single-element mixed array with a integer key. The * value is already incref'd. */ ArrayLval EmptyArray::MakeMixed(int64_t key, TypedValue val) { auto const ad = reqAllocArray(MixedArray::SmallScale); MixedArray::InitSmall(ad, 1/*count*/, 1/*size*/, (key >= 0) ? key + 1 : 0); auto const data = ad->data(); auto const hash = reinterpret_cast<int32_t*>(data + MixedArray::SmallSize); auto const mask = MixedArray::SmallMask; auto h = hash_int64(key); hash[h & mask] = 0; data[0].setIntKey(key, h); auto& lval = data[0].data; lval.m_data = val.m_data; lval.m_type = val.m_type; assert(ad->kind() == ArrayData::kMixedKind); assert(ad->m_size == 1); assert(ad->m_pos == 0); assert(ad->hasExactlyOneRef()); assert(ad->m_scale == MixedArray::SmallScale); assert(ad->m_used == 1); assert(ad->checkInvariants()); return { ad, &tvAsVariant(&lval) }; }