NEVER_INLINE strhash_t StringData::hashHelper() const { assert(!isProxy()); strhash_t h = hash_string_i_unsafe(m_data, m_len); assert(h >= 0); m_hash |= h; return h; }
NEVER_INLINE StringData* StringData::MakeProxySlowPath(const APCString* apcstr) { #ifdef NO_M_DATA always_assert(false); not_reached(); #else auto const sd = static_cast<StringData*>( MM().mallocSmallSize(sizeof(StringData) + sizeof(Proxy)) ); auto const data = apcstr->getStringData(); sd->m_data = const_cast<char*>(data->m_data); sd->m_hdr.init(data->m_hdr, 1); sd->m_lenAndHash = data->m_lenAndHash; sd->proxy()->apcstr = apcstr; sd->enlist(); apcstr->reference(); assert(sd->m_len == data->size()); assert(sd->m_hdr.aux == data->m_hdr.aux); assert(sd->m_hdr.kind == HeaderKind::String); assert(sd->hasExactlyOneRef()); assert(sd->m_hash == data->m_hash); assert(sd->isProxy()); assert(sd->checkSane()); return sd; #endif }
StringData* StringData::append(folly::StringPiece range) { assert(!hasMultipleRefs()); auto s = range.data(); auto const len = range.size(); if (len == 0) return this; auto const newLen = size_t(m_len) + size_t(len); if (UNLIKELY(newLen > MaxSize)) { throw_string_too_large(newLen); } /* * We may have an aliasing append. We don't allow appending with an * interior pointer, although we may be asked to append less than * the whole string in an aliasing situation. */ ALIASING_APPEND_ASSERT(s, len); auto const requestLen = static_cast<uint32_t>(newLen); auto const target = UNLIKELY(isProxy()) ? escalate(requestLen) : reserve(requestLen); memcpy(target->mutableData() + m_len, s, len); target->setSize(newLen); assert(target->checkSane()); return target; }
NEVER_INLINE void StringData::releaseDataSlowPath() { assert(isProxy()); assert(checkSane()); proxy()->apcstr->getHandle()->unreference(); delist(); MM().freeSmallSize(this, sizeof(StringData) + sizeof(Proxy)); }
StringData* StringData::increment() { assert(!isStatic()); assert(!empty()); auto const sd = UNLIKELY(isProxy()) ? escalate(m_len + 1) : reserve(m_len + 1); sd->incrementHelper(); return sd; }
ALWAYS_INLINE void StringData::delist() { assert(isProxy()); auto& payload = *proxy(); auto const next = payload.node.next; auto const prev = payload.node.prev; assert(uintptr_t(next) != kMallocFreeWord); assert(uintptr_t(prev) != kMallocFreeWord); next->prev = prev; prev->next = next; }
ALWAYS_INLINE void StringData::enlist() { assert(isProxy()); auto& head = MM().getStringList(); // insert after head auto const next = head.next; auto& payload = *proxy(); assert(uintptr_t(next) != kMallocFreeWord); payload.node.next = next; payload.node.prev = &head; next->prev = head.next = &payload.node; }
CnfExp* CnfPass::produceDisjunction(Solver& solver, Edge e) { Edge largestEdge; CnfExp* accum = fillArgs(e, true, largestEdge); if (accum == NULL) accum = new CnfExp(false); // This is necessary to check to make sure that we don't start out // with an accumulator that is "too large". /// @todo Strictly speaking, introProxy doesn't *need* to free /// memory, then this wouldn't have to reallocate CnfExp /// @todo When this call to introProxy is made, the semantic /// negation pointer will have been destroyed. Thus, it will not /// be possible to use the correct proxy. That should be fixed. // at this point, we will either have NULL, or a destructible expression if (accum->clauseSize() > CLAUSE_MAX) accum = new CnfExp(introProxy(solver, largestEdge, accum, largestEdge.isNeg())); int i = _args.size(); while (i != 0) { Edge arg(_args[--i]); if (arg.isVar()) { accum->disjoin(atomLit(arg)); } else { CnfExp* argExp = (CnfExp*) arg->ptrAnnot(arg.isNeg()); assert(argExp != NULL); bool destroy = (--arg->intAnnot(arg.isNeg()) == 0); if (isProxy(argExp)) { // variable has been introduced accum->disjoin(getProxy(argExp)); } else if (argExp->litSize() == 0) { accum->disjoin(argExp, destroy); } else { // check to see if we should introduce a proxy int aL = accum->litSize(); // lits in accum int eL = argExp->litSize(); // lits in argument int aC = accum->clauseSize(); // clauses in accum int eC = argExp->clauseSize(); // clauses in argument if (eC > CLAUSE_MAX || (eL * aC + aL * eC > eL + aC + aL + aC)) { accum->disjoin(introProxy(solver, arg, argExp, arg.isNeg())); } else { accum->disjoin(argExp, destroy); if (destroy) arg->ptrAnnot(arg.isNeg()) = NULL; } } } } return accum; }
// State transition from Mode::Shared to Mode::Flat. StringData* StringData::escalate(size_t cap) { assert(isProxy() && !isStatic() && cap >= m_len); auto const sd = allocFlatForLenSmall(cap); sd->m_lenAndHash = m_lenAndHash; auto const data = reinterpret_cast<char*>(sd + 1); *memcpy8(data, m_data, m_len) = 0; assert(sd->hasExactlyOneRef()); assert(sd->isFlat()); assert(sd->checkSane()); return sd; }
unsigned StringData::sweepAll() { auto& head = MM().getStringList(); auto count = 0; for (StringDataNode *next, *n = head.next; n != &head; n = next) { count++; next = n->next; assert(next && uintptr_t(next) != kSmallFreeWord); assert(next && uintptr_t(next) != kMallocFreeWord); auto const s = node2str(n); assert(s->isProxy()); s->proxy()->apcstr->getHandle()->unreference(); } head.next = head.prev = &head; return count; }
void Tile::draw(const Style& _style, const View& _view) { auto& styleMesh = getMesh(_style); if (styleMesh) { auto& shader = _style.getShaderProgram(); float zoomAndProxy = isProxy() ? -m_id.z : m_id.z; shader->setUniformMatrix4f("u_model", m_modelMatrix); shader->setUniformf("u_tile_origin", m_tileOrigin.x, m_tileOrigin.y, zoomAndProxy); styleMesh->draw(*shader); } }
void StringData::dump() const { auto s = slice(); printf("StringData(%d) (%s%s%s%d): [", m_hdr.count, isProxy() ? "proxy " : "", isStatic() ? "static " : "", isUncounted() ? "uncounted " : "", static_cast<int>(s.size())); for (uint32_t i = 0; i < s.size(); i++) { char ch = s.data()[i]; if (isprint(ch)) { printf("%c", ch); } else { printf("\\x%02x", ch); } } printf("]\n"); }
DataType StringData::isNumericWithVal(int64_t &lval, double &dval, int allow_errors, int* overflow) const { if (m_hash < 0) return KindOfNull; DataType ret = KindOfNull; auto s = slice(); if (s.size()) { ret = is_numeric_string( s.data(), s.size(), &lval, &dval, allow_errors, overflow ); if (ret == KindOfNull && !isProxy() && allow_errors) { m_hash |= STRHASH_MSB; } } return ret; }
StringData* StringData::append(folly::StringPiece r1, folly::StringPiece r2, folly::StringPiece r3) { assert(!hasMultipleRefs()); auto const len = r1.size() + r2.size() + r3.size(); if (len == 0) return this; if (UNLIKELY(size_t(m_len) + size_t(len) > MaxSize)) { throw_string_too_large(size_t(len) + size_t(m_len)); } auto const newLen = m_len + len; /* * We may have an aliasing append. We don't allow appending with an * interior pointer, although we may be asked to append less than * the whole string in an aliasing situation. */ ALIASING_APPEND_ASSERT(r1.data(), r1.size()); ALIASING_APPEND_ASSERT(r2.data(), r2.size()); ALIASING_APPEND_ASSERT(r3.data(), r3.size()); auto const target = UNLIKELY(isProxy()) ? escalate(newLen) : reserve(newLen); /* * memcpy is safe even if it's a self append---the regions will be * disjoint, since rN.data() can't point past the start of our source * pointer, and rN.size() is smaller than the old length. */ void* p = target->mutableData(); p = memcpy((char*)p + m_len, r1.data(), r1.size()); p = memcpy((char*)p + r1.size(), r2.data(), r2.size()); memcpy((char*)p + r2.size(), r3.data(), r3.size()); target->setSize(newLen); assert(target->checkSane()); return target; }
// test iterating objects in slabs void MemoryManager::checkHeap(const char* phase) { size_t bytes=0; std::vector<Header*> hdrs; std::unordered_set<FreeNode*> free_blocks; std::unordered_set<APCLocalArray*> apc_arrays; std::unordered_set<StringData*> apc_strings; size_t counts[NumHeaderKinds]; for (unsigned i=0; i < NumHeaderKinds; i++) counts[i] = 0; forEachHeader([&](Header* h) { hdrs.push_back(&*h); bytes += h->size(); counts[(int)h->kind()]++; switch (h->kind()) { case HeaderKind::Free: free_blocks.insert(&h->free_); break; case HeaderKind::Apc: if (h->apc_.m_sweep_index != kInvalidSweepIndex) { apc_arrays.insert(&h->apc_); } break; case HeaderKind::String: if (h->str_.isProxy()) apc_strings.insert(&h->str_); break; case HeaderKind::Packed: case HeaderKind::Struct: case HeaderKind::Mixed: case HeaderKind::Empty: case HeaderKind::Globals: case HeaderKind::Proxy: case HeaderKind::Object: case HeaderKind::WaitHandle: case HeaderKind::ResumableObj: case HeaderKind::AwaitAllWH: case HeaderKind::Vector: case HeaderKind::Map: case HeaderKind::Set: case HeaderKind::Pair: case HeaderKind::ImmVector: case HeaderKind::ImmMap: case HeaderKind::ImmSet: case HeaderKind::Resource: case HeaderKind::Ref: case HeaderKind::ResumableFrame: case HeaderKind::NativeData: case HeaderKind::SmallMalloc: case HeaderKind::BigMalloc: break; case HeaderKind::BigObj: case HeaderKind::Hole: assert(false && "forEachHeader skips these kinds"); break; } }); // check the free lists for (auto i = 0; i < kNumSmallSizes; i++) { for (auto n = m_freelists[i].head; n; n = n->next) { assert(free_blocks.find(n) != free_blocks.end()); free_blocks.erase(n); } } assert(free_blocks.empty()); // check the apc array list assert(apc_arrays.size() == m_apc_arrays.size()); for (auto a : m_apc_arrays) { assert(apc_arrays.find(a) != apc_arrays.end()); apc_arrays.erase(a); } assert(apc_arrays.empty()); // check the apc string list for (StringDataNode *next, *n = m_strings.next; n != &m_strings; n = next) { next = n->next; auto const s = StringData::node2str(n); assert(s->isProxy()); assert(apc_strings.find(s) != apc_strings.end()); apc_strings.erase(s); } assert(apc_strings.empty()); // heap check is done. If we are not exiting, check pointers using HeapGraph if (Trace::moduleEnabled(Trace::heapreport)) { auto g = makeHeapGraph(); if (!exiting()) checkPointers(g, phase); if (Trace::moduleEnabled(Trace::heapreport, 2)) { printHeapReport(g, phase); } } }