void HashCollection::eraseNoCompact(ssize_t pos) { assert(canMutateBuffer()); assert(validPos(pos) && !isTombstone(pos)); assert(m_size > 0); arrayData()->eraseNoCompact(pos); --m_size; }
ALWAYS_INLINE void BaseMap::setImpl(StringData* key, const TypedValue* val) { if (!raw) { mutate(); } assert(val->m_type != KindOfRef); assert(canMutateBuffer()); retry: strhash_t h = key->hash(); auto* p = findForInsert(key, h); assert(p); if (validPos(*p)) { auto& e = data()[*p]; TypedValue old = e.data; cellDup(*val, e.data); tvRefcountedDecRef(old); return; } if (UNLIKELY(isFull())) { makeRoom(); goto retry; } if (!raw) { ++m_version; } auto& e = allocElm(p); cellDup(*val, e.data); e.setStrKey(key, h); updateIntLikeStrKeys(key); }
ALWAYS_INLINE void BaseMap::setImpl(int64_t h, const TypedValue* val) { if (!raw) { mutate(); } assert(val->m_type != KindOfRef); assert(canMutateBuffer()); retry: auto p = findForInsert(h); assert(p); if (validPos(*p)) { auto& e = data()[*p]; TypedValue old = e.data; cellDup(*val, e.data); tvRefcountedDecRef(old); return; } if (UNLIKELY(isFull())) { makeRoom(); goto retry; } if (!raw) { ++m_version; } auto& e = allocElm(p); cellDup(*val, e.data); e.setIntKey(h); updateNextKI(h); }
void HashCollection::shrink(uint32_t oldCap /* = 0 */) { assert(isCapacityTooHigh() && (oldCap == 0 || oldCap < cap())); assert(m_size <= posLimit() && posLimit() <= cap()); dropImmCopy(); uint32_t newCap; if (oldCap != 0) { // If an old capacity was specified, use that newCap = oldCap; // .. unless the old capacity is too small, in which case we use the // smallest capacity that is large enough to hold the current number // of elements. for (; newCap < m_size; newCap <<= 1) {} assert(newCap == computeMaxElms(folly::nextPowTwo<uint64_t>(newCap) - 1)); } else { if (m_size == 0 && nextKI() == 0) { decRefArr(m_arr); m_arr = staticEmptyDictArrayAsMixed(); return; } // If no old capacity was provided, we compute the largest capacity // where m_size/cap() is less than or equal to 0.5 for good hysteresis size_t doubleSz = size_t(m_size) * 2; uint32_t capThreshold = (doubleSz < size_t(MaxSize)) ? doubleSz : MaxSize; for (newCap = SmallSize * 2; newCap < capThreshold; newCap <<= 1) {} } assert(SmallSize <= newCap && newCap <= MaxSize); assert(m_size <= newCap); auto* oldAd = arrayData(); if (!oldAd->cowCheck()) { // If the buffer's refcount is 1, we can teleport the elements // to a new buffer auto oldBuf = data(); auto oldUsed = posLimit(); auto oldNextKI = nextKI(); auto arr = MixedArray::asMixed(MixedArray::MakeReserveDict(newCap)); auto data = mixedData(arr); m_arr = arr; auto table = (int32_t*)(data + size_t(newCap)); auto table_mask = tableMask(); arr->m_size = m_size; setPosLimit(m_size); setNextKI(oldNextKI); for (uint32_t frPos = 0, toPos = 0; toPos < m_size; ++toPos, ++frPos) { frPos = skipTombstonesNoBoundsCheck(frPos, oldUsed, oldBuf); copyElm(oldBuf[frPos], data[toPos]); *findForNewInsert(table, table_mask, data[toPos].probe()) = toPos; } oldAd->setZombie(); decRefArr(oldAd); } else { // For cases where the buffer's refcount is greater than 1, call // resizeHelper() resizeHelper(newCap); } assert(canMutateBuffer()); assert(m_immCopy.isNull()); assert(!isCapacityTooHigh() || newCap == oldCap); }
NEVER_INLINE void HashCollection::reserve(int64_t sz) { assert(m_size <= posLimit() && posLimit() <= cap()); auto cap = static_cast<int64_t>(this->cap()); if (LIKELY(sz > cap)) { if (UNLIKELY(sz > int64_t(MaxReserveSize))) { throwReserveTooLarge(); } // Fast path: The requested capacity is greater than the current capacity. // Grow to the smallest allowed capacity that is sufficient. grow(MixedArray::computeScaleFromSize(sz)); assert(canMutateBuffer()); return; } if (LIKELY(!hasTombstones())) { // Fast path: There are no tombstones and the requested capacity is less // than or equal to the current capacity. mutate(); return; } if (sz + int64_t(posLimit() - m_size) <= cap || isDensityTooLow()) { // If we reach this case, then either (1) density is too low (this is // possible because of methods like retain()), in which case we compact // to make room and return, OR (2) density is not too low and either // sz < m_size or there's enough room to add sz-m_size elements, in // which case we do nothing and return. compactOrShrinkIfDensityTooLow(); assert(sz + int64_t(posLimit() - m_size) <= cap); mutate(); return; } // If we reach this case, then density is not too low and sz > m_size and // there is not enough room to add sz-m_size elements. While would could // compact to make room, it's better for Hysteresis if we grow capacity // by 2x instead. assert(!isDensityTooLow()); assert(sz + int64_t(posLimit() - m_size) > cap); assert(cap < MaxSize && tableMask() != 0); auto newScale = scale() * 2; assert(sz > 0 && MixedArray::Capacity(newScale) >= sz); grow(newScale); assert(canMutateBuffer()); }
void HashCollection::mutateImpl() { assert(arrayData()->hasMultipleRefs()); dropImmCopy(); if (canMutateBuffer()) { return; } auto* oldAd = arrayData(); m_arr = MixedArray::asMixed(MixedArray::Copy(oldAd)); assert(oldAd->hasMultipleRefs()); oldAd->decRefCount(); }
ALWAYS_INLINE void HashCollection::resizeHelper(uint32_t newCap) { assert(newCap >= m_size); assert(m_immCopy.isNull()); // Allocate a new ArrayData with the specified capacity and dup // all the elements (without copying over tombstones). auto ad = arrayData() == staticEmptyDictArrayAsMixed() ? MixedArray::asMixed(MixedArray::MakeReserveDict(newCap)) : MixedArray::CopyReserve(m_arr, newCap); decRefArr(m_arr); m_arr = ad; assert(canMutateBuffer()); }
void HashCollection::compact() { assert(isDensityTooLow()); dropImmCopy(); if (!arrayData()->cowCheck()) { // MixedArray::compact can only handle cases where the buffer's // refcount is 1. arrayData()->compact(false); } else { // For cases where the buffer's refcount is greater than 1, call // resizeHelper(). resizeHelper(cap()); } assert(canMutateBuffer()); assert(m_immCopy.isNull()); assert(!isDensityTooLow()); }
NEVER_INLINE void HashCollection::makeRoom() { assert(isFull()); assert(posLimit() == cap()); if (LIKELY(!isDensityTooLow())) { if (UNLIKELY(cap() == MaxSize)) { throwTooLarge(); } assertx(scale() > 0); grow(scale() * 2); } else { compact(); } assert(canMutateBuffer()); assert(m_immCopy.isNull()); assert(!isFull()); }
void HashCollection::grow(uint32_t newScale) { auto newCap = MixedArray::Capacity(newScale); assert(m_size <= posLimit() && posLimit() <= cap() && cap() <= newCap); assert(SmallSize <= newCap && newCap <= MaxSize); assert(m_size <= newCap); auto oldAd = arrayData(); dropImmCopy(); if (m_size > 0 && !oldAd->cowCheck()) { // MixedArray::Grow can only handle non-empty cases where the // buffer's refcount is 1. m_arr = MixedArray::Grow(oldAd, newScale); decRefArr(oldAd); } else { // For cases where m_size is zero or the buffer's refcount is // greater than 1, call resizeHelper(). resizeHelper(newCap); } assert(canMutateBuffer()); assert(m_immCopy.isNull()); }
typename std::enable_if< std::is_base_of<BaseSet, TSet>::value, Object>::type BaseSet::php_map(const Variant& callback) const { VMRegGuard _; CallCtx ctx; vm_decode_function(callback, nullptr, false, ctx); if (!ctx.func) { SystemLib::throwInvalidArgumentExceptionObject( "Parameter must be a valid callback"); } auto set = req::make<TSet>(); if (!m_size) return Object{std::move(set)}; assert(posLimit() != 0); assert(hashSize() > 0); assert(set->arrayData() == staticEmptyMixedArray()); auto oldCap = set->cap(); set->reserve(posLimit()); // presume minimum collisions ... assert(set->canMutateBuffer()); constexpr int64_t argc = useKey ? 2 : 1; TypedValue argv[argc]; for (ssize_t pos = iter_begin(); iter_valid(pos); pos = iter_next(pos)) { auto e = iter_elm(pos); TypedValue tvCbRet; int32_t pVer = m_version; if (useKey) { argv[0] = e->data; } argv[argc-1] = e->data; g_context->invokeFuncFew(&tvCbRet, ctx, argc, argv); // Now that tvCbRet is live, make sure to decref even if we throw. SCOPE_EXIT { tvRefcountedDecRef(&tvCbRet); }; if (UNLIKELY(m_version != pVer)) throw_collection_modified(); set->addRaw(&tvCbRet); } // ... and shrink back if that was incorrect set->shrinkIfCapacityTooHigh(oldCap); return Object{std::move(set)}; }
typename std::enable_if< std::is_base_of<BaseVector, TVector>::value, Object>::type BaseSet::php_concat(const Variant& iterable) { size_t itSize; ArrayIter iter = getArrayIterHelper(iterable, itSize); auto vec = req::make<TVector>(); uint32_t sz = m_size; vec->reserve((size_t)sz + itSize); assert(vec->canMutateBuffer()); vec->setSize(sz); uint32_t used = posLimit(); for (uint32_t i = 0, j = 0; i < used; ++i) { if (isTombstone(i)) { continue; } cellDup(data()[i].data, vec->data()[j]); ++j; } for (; iter; ++iter) { vec->addRaw(iter.second()); } return Object{std::move(vec)}; }