// Rarely used, so don't inline it POBJECT ObjectMemory::allocLargeObject(MWORD objectSize, OTE*& ote) { #ifdef MEMSTATS ++m_nLargeAllocated; #endif POBJECT pObj = static_cast<POBJECT>(allocChunk(_ROUND2(objectSize, sizeof(DWORD)))); // allocateOop expects crit section to be used ote = allocateOop(pObj); ote->setSize(objectSize); ote->m_flags.m_space = OTEFlags::NormalSpace; return pObj; }
MojErr MojBuffer::writeableChunk(Chunk*& chunkOut, MojSize requestedSize) { if (!m_chunks.empty()) { chunkOut = m_chunks.back(); if (chunkOut->freeSpace() > 0) return MojErrNone; } chunkOut = allocChunk(MojMax(requestedSize, ChunkSize - sizeof(Chunk))); MojAllocCheck(chunkOut); m_chunks.pushBack(chunkOut); if (m_readPos == NULL) m_readPos = chunkOut->data(); return MojErrNone; }
bool Allocator::fill(size_t nbytes, bool fallible) { if (nbytes < MIN_CHUNK_SZB) nbytes = MIN_CHUNK_SZB; size_t chunkbytes = sizeof(Chunk) + nbytes - sizeof(int64_t); void* mem = allocChunk(chunkbytes, fallible); if (mem) { Chunk* chunk = (Chunk*) mem; chunk->prev = current_chunk; current_chunk = chunk; current_top = (char*)chunk->data; current_limit = (char*)mem + chunkbytes; return true; } else { NanoAssert(fallible); return false; } }
std::size_t StorageManager::getSlots(const std::size_t num_slots) { if (num_slots > kAllocationChunkSizeSlots) { FATAL_ERROR("Attempted to allocate more than kAllocationChunkSizeSlots " "contiguous slots in StorageManager::getSlots()"); } size_t min_slot; bool found = false; for (size_t alloc_chunk_num = 0; alloc_chunk_num < alloc_chunks_.size(); ++alloc_chunk_num) { min_slot = alloc_chunk_num * kAllocationChunkSizeSlots; while (min_slot <= (alloc_chunk_num + 1) * kAllocationChunkSizeSlots - num_slots) { found = true; for (size_t i = min_slot; i < min_slot + num_slots; ++i) { if (!free_bitmap_[i]) { min_slot = i + 1; found = false; break; } } if (found) { break; } } if (found) { break; } } if (!found) { allocChunk(); min_slot = (alloc_chunks_.size() - 1) * kAllocationChunkSizeSlots; } for (size_t i = min_slot; i < min_slot + num_slots; ++i) { free_bitmap_[i] = false; } #ifdef QUICKSTEP_CLEAR_BLOCK_MEMORY memset(getSlotAddress(min_slot), 0, num_slots * kSlotSizeBytes); #endif return min_slot; }
ST::Object* ObjectMemory::AllocObj(OTE * ote, MWORD allocSize) { ST::Object* pObj; if (allocSize <= MaxSmallObjectSize) { // Allocate from one of the memory pools pObj = static_cast<POBJECT>(allocSmallChunk(allocSize)); ote->m_flags.m_space = OTEFlags::PoolSpace; } else { // Normal space and other spaces allocated from heap (may not be too many) pObj = static_cast<POBJECT>(allocChunk(allocSize)); ote->m_flags.m_space = OTEFlags::NormalSpace; } ote->m_location = pObj; return pObj; }
MojErr MojBuffer::consolidate() { if (m_chunks.size() > 1) { // calc total size MojSize size = 0; for (ChunkList::ConstIterator i = m_chunks.begin(); i != m_chunks.end(); ++i) { size += (*i)->dataSize(); } // alloc chunk big enoug to hold it all Chunk* chunk = allocChunk(size); MojAllocCheck(chunk); // copy alldata to new chunk for (ChunkList::ConstIterator i = m_chunks.begin(); i != m_chunks.end(); ++i) { chunk->write((*i)->data(), (*i)->dataSize()); } // replace existing chunks with new one clear(); m_chunks.pushBack(chunk); } return MojErrNone; }
POBJECT ObjectMemory::basicResize(POTE ote, MWORD byteSize, int extra) { ASSERT(!isIntegerObject(ote)); POBJECT pObject; /* #ifdef _DEBUG TRACESTREAM << "Resizing "; Interpreter::printObject(Oop(ote), TRACESTREAM); TRACESTREAM << " (" << ote->m_location << ") from size " << ObjectMemory::sizeOf(ote) << " to size " << byteSize << "\n"; #endif */ switch(ote->heapSpace()) { case OTEFlags::NormalSpace: { // TRACE("Resizing normal object...\n"); pObject = ote->m_location; pObject = reallocChunk(pObject, byteSize+extra); if (pObject) { ote->m_location = pObject; ote->setSize(byteSize); } break; } case OTEFlags::VirtualSpace: // TRACE("Resizing virtual object...\n"); pObject = resizeVirtual(ote, byteSize+extra); break; case OTEFlags::PoolSpace: { #if defined(_DEBUG) if (abs(Interpreter::executionTrace) > 0) checkPools(); #endif // May be able to do some quicker resizing here if size is still in same pool? if ((byteSize+extra) > MaxSmallObjectSize) { pObject = allocChunk(byteSize+extra); ote->m_flags.m_space = OTEFlags::NormalSpace; } else pObject = allocSmallChunk(byteSize+extra); POBJECT pOldObject = ote->m_location; MWORD oldSize = ote->getSize(); memcpy(pObject, pOldObject, min(oldSize, byteSize)); freeSmallChunk(pOldObject, ote->sizeOf()); ote->m_location = pObject; ote->setSize(byteSize); break; } default: // Not resizeable return NULL; } #if defined(_DEBUG) if (abs(Interpreter::executionTrace) > 0) checkPools(); // TRACESTREAM << "After Resize: "; // Interpreter::printObject(Oop(ote), TRACESTREAM); // TRACESTREAM << "\n"; #endif return pObject; }
void ChunkCache::setChunk(PositionI chunkBasePosition, const std::vector<std::uint8_t> &buffer) { getDebugLog() << "stored chunk at " << chunkBasePosition << " with buffer of length " << buffer.size() << postnl; std::unique_lock<std::mutex> lockIt(theLock); static_assert(NullChunk == std::size_t(), "invalid value for NullChunk"); std::size_t &startingChunkIndex = startingChunksMap[chunkBasePosition]; std::size_t prevChunkIndex = NullChunk; std::size_t bufferPos = 0; while(bufferPos < buffer.size()) { std::size_t chunkIndex; if(prevChunkIndex == NullChunk) chunkIndex = startingChunkIndex; else chunkIndex = fileChunks[prevChunkIndex].nextChunk; if(chunkIndex == NullChunk) { chunkIndex = allocChunk(); if(prevChunkIndex == NullChunk) startingChunkIndex = chunkIndex; else fileChunks[prevChunkIndex].nextChunk = chunkIndex; } prevChunkIndex = chunkIndex; std::size_t currentChunkSize = chunkSize; if(currentChunkSize > buffer.size() - bufferPos) currentChunkSize = buffer.size() - bufferPos; fileChunks[chunkIndex].usedSize = currentChunkSize; bufferPos += currentChunkSize; } std::size_t freeChunkIndex; if(prevChunkIndex == NullChunk) { freeChunkIndex = startingChunkIndex; startingChunkIndex = NullChunk; } else { freeChunkIndex = fileChunks[prevChunkIndex].nextChunk; fileChunks[prevChunkIndex].nextChunk = NullChunk; } while(freeChunkIndex != NullChunk) { std::size_t nextChunkIndex = fileChunks[freeChunkIndex].nextChunk; freeChunk(freeChunkIndex); freeChunkIndex = nextChunkIndex; } bufferPos = 0; std::size_t chunkIndex = startingChunkIndex; while(bufferPos < buffer.size()) { assert(chunkIndex != NullChunk); std::size_t currentChunkSize = chunkSize; if(currentChunkSize > buffer.size() - bufferPos) currentChunkSize = buffer.size() - bufferPos; fileChunks[chunkIndex].usedSize = currentChunkSize; try { static_assert(NullChunk == 0, "invalid value for NullChunk"); std::int64_t filePosition = static_cast<std::int64_t>(chunkIndex - 1) * chunkSize; // skip null chunk writer->seek(filePosition, stream::SeekPosition::Start); writer->writeBytes(&buffer[bufferPos], currentChunkSize); } catch(stream::IOException &e) { try { writer->flush(); } catch(stream::IOException &) { } throw e; } bufferPos += currentChunkSize; chunkIndex = fileChunks[chunkIndex].nextChunk; } assert(chunkIndex == NullChunk); writer->flush(); }