int CacheFile::writeFile(BYTE *data, int size) { if ((data) && (size > 0)) { int nr_blocks_required = 1 + (size / BLOCK_SIZE); int count = 0; int s = 0; int stored_alloc; int alloc; stored_alloc = alloc = allocateBlock(); do { int copy_alloc = alloc; Block *block = lockBlock(copy_alloc); block->next = 0; memcpy(block->data, data + s, (s + BLOCK_SIZE > size) ? size - s : BLOCK_SIZE); if (count + 1 < nr_blocks_required) alloc = block->next = allocateBlock(); unlockBlock(copy_alloc); s += BLOCK_SIZE; } while (++count < nr_blocks_required); return stored_alloc; } return 0; }
void *PoolMemoryAllocator::fillPool(MemElemPtr &pFreeBytes, __uint16 nBytes) { #ifdef OGDF_MEMORY_POOL_NTS pFreeBytes = allocateBlock(nBytes); #else s_criticalSection->enter(); PoolElement &pe = s_pool[nBytes]; if(pe.m_currentVector != 0) { pFreeBytes = pe.m_currentVector->m_pool[pe.m_index]; if(--pe.m_index < 0) { PoolVector *pV = pe.m_currentVector; pe.m_currentVector = pV->m_prev; pe.m_index = ePoolVectorLength-1; MemElemPtr(pV)->m_next = s_freeVectors; s_freeVectors = MemElemPtr(pV); } s_criticalSection->leave(); } else { s_criticalSection->leave(); pFreeBytes = allocateBlock(nBytes); } #endif MemElemPtr p = pFreeBytes; pFreeBytes = p->m_next; return p; }
/** * Menuliskan isi buffer ke filesystem * @param position * @param buffer * @param size * @param offset * @return */ int POI::writeBlock(Block position, const char *buffer, int size, int offset) { /* kalau sudah di END_BLOCK, return */ if (position == END_BLOCK) { return 0; } /* kalau offset >= BLOCK_SIZE */ if (offset >= BLOCK_SIZE) { /* kalau nextBlock tidak ada, alokasikan */ if (nextBlock[position] == END_BLOCK) { setNextBlock(position, allocateBlock()); } return writeBlock(nextBlock[position], buffer, size, offset - BLOCK_SIZE); } file.seekp(BLOCK_SIZE * DATA_POOL_OFFSET + position * BLOCK_SIZE + offset); int size_now = size; if (offset + size_now > BLOCK_SIZE) { size_now = BLOCK_SIZE - offset; } file.write(buffer, size_now); /* kalau size > block size, lanjutkan di nextBlock */ if (offset + size > BLOCK_SIZE) { /* kalau nextBlock tidak ada, alokasikan */ if (nextBlock[position] == END_BLOCK) { setNextBlock(position, allocateBlock()); } return size_now + writeBlock(nextBlock[position], buffer + BLOCK_SIZE, offset + size - BLOCK_SIZE); } return size_now; }
bool_t writeFile (uint8_t channel) { struct channelTableStruct *channelStruct = &channelTable[channel]; ATTENTION_OFF(); /* if at end of inode, make new inode */ if (channelStruct->inodePtr == (BLOCKSIZE / sizeof (block_t)) - 1) { if (!(channelStruct->inode[channelStruct->inodePtr] = allocateBlock())) { return FALSE; /* FIXME, cleanup */ } ataPutBlock (channelStruct->inodeBlock, (uint8_t *)(channelStruct->inode)); channelStruct->inodeBlock = channelStruct->inode[channelStruct->inodePtr]; memset (channelStruct->inode, '\0', BLOCKSIZE); channelStruct->inodePtr = 0; } /* allocate new block */ if (!(channelStruct->inode[channelStruct->inodePtr] = allocateBlock())) { return FALSE; /* FIXME, cleanup */ } /* write block to disk */ ataPutBlock (channelStruct->inode[channelStruct->inodePtr++], channelStruct->buffer); channelStruct->dirEntry.fileSize++; ATTENTION_ON(); return TRUE; }
char testFunctionCallback(float seconds, float frequency) { MMRESULT result; HWAVEOUT waveOut; result = waveOutOpen(&waveOut, WAVE_MAPPER, &waveFormat, (DWORD_PTR)&waveOutCallback, 0, CALLBACK_FUNCTION); if(result != MMSYSERR_NOERROR) { printf("waveOutOpen failed (result=%d)\n", result); return 1; } printf("[tid=%d] Opened Wave Mapper!\n", GetCurrentThreadId()); fflush(stdout); waitForKey("to start rendering sound"); DWORD sampleCount = seconds * waveFormat.nSamplesPerSec; LPSTR block1 = allocateBlock(sampleCount); LPSTR block2 = allocateBlock(sampleCount); fillSinWave(block1, frequency, sampleCount); fillSinWave(block2, frequency * 1.5, sampleCount); printf("Writing block (0x%p)...\n", block1); fflush(stdout); { WAVEHDR header1; WAVEHDR header2; ZeroMemory(&header1, sizeof(WAVEHDR)); header1.dwBufferLength = sampleCount * waveFormat.nBlockAlign; header1.lpData = block1; writeBlock(waveOut, &header1); ZeroMemory(&header2, sizeof(WAVEHDR)); header2.dwBufferLength = sampleCount * waveFormat.nBlockAlign; header2.lpData = block2; writeBlock(waveOut, &header2); } waitForKey("to close"); waveOutClose(waveOut); return 0; }
void* AllocationSpace::allocateSlowCase(MarkedSpace::SizeClass& sizeClass) { #if COLLECT_ON_EVERY_ALLOCATION m_heap->collectAllGarbage(); ASSERT(m_heap->m_operationInProgress == NoOperation); #endif void* result = tryAllocate(sizeClass); if (LIKELY(result != 0)) return result; AllocationEffort allocationEffort; if (( #if ENABLE(GGC) m_markedSpace.nurseryWaterMark() < m_heap->m_minBytesPerCycle #else m_heap->waterMark() < m_heap->highWaterMark() #endif ) || !m_heap->m_isSafeToCollect) allocationEffort = AllocationMustSucceed; else allocationEffort = AllocationCanFail; MarkedBlock* block = allocateBlock(sizeClass.cellSize, allocationEffort); if (block) { m_markedSpace.addBlock(sizeClass, block); void* result = tryAllocate(sizeClass); ASSERT(result); return result; } m_heap->collect(Heap::DoNotSweep); result = tryAllocate(sizeClass); if (result) return result; ASSERT(m_heap->waterMark() < m_heap->highWaterMark()); m_markedSpace.addBlock(sizeClass, allocateBlock(sizeClass.cellSize, AllocationMustSucceed)); result = tryAllocate(sizeClass); ASSERT(result); return result; }
void CopiedSpace::init() { m_toSpace = &m_blocks1; m_fromSpace = &m_blocks2; allocateBlock(); }
void CopiedSpace::doneCopying() { { MutexLocker locker(m_loanedBlocksLock); while (m_numberOfLoanedBlocks > 0) m_loanedBlocksCondition.wait(m_loanedBlocksLock); } ASSERT(m_inCopyingPhase == m_shouldDoCopyPhase); m_inCopyingPhase = false; while (!m_fromSpace->isEmpty()) { CopiedBlock* block = m_fromSpace->removeHead(); // All non-pinned blocks in from-space should have been reclaimed as they were evacuated. ASSERT(block->isPinned() || !m_shouldDoCopyPhase); block->didSurviveGC(); // We don't add the block to the blockSet because it was never removed. ASSERT(m_blockSet.contains(block)); m_blockFilter.add(reinterpret_cast<Bits>(block)); m_toSpace->push(block); } if (!m_toSpace->head()) allocateBlock(); else m_allocator.setCurrentBlock(m_toSpace->head()); m_shouldDoCopyPhase = false; }
void* MarkedAllocator::allocateSlowCase(size_t bytes) { ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock()); #if COLLECT_ON_EVERY_ALLOCATION if (!m_heap->isDeferred()) m_heap->collectAllGarbage(); ASSERT(m_heap->m_operationInProgress == NoOperation); #endif ASSERT(!m_markedSpace->isIterating()); ASSERT(!m_freeList.head); m_heap->didAllocate(m_freeList.bytes); void* result = tryAllocate(bytes); if (LIKELY(result != 0)) return result; if (m_heap->collectIfNecessaryOrDefer()) { result = tryAllocate(bytes); if (result) return result; } ASSERT(!m_heap->shouldCollect()); MarkedBlock* block = allocateBlock(bytes); ASSERT(block); addBlock(block); result = tryAllocate(bytes); ASSERT(result); return result; }
char testNoCallback(float seconds, float frequency) { MMRESULT result; HWAVEOUT waveOut; result = waveOutOpen(&waveOut, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL); if(result != MMSYSERR_NOERROR) { printf("waveOutOpen failed (result=%d)\n", result); return 1; } printf("Opened Wave Mapper!\n"); fflush(stdout); DWORD sampleCount = seconds * waveFormat.nSamplesPerSec; LPSTR block = allocateBlock(sampleCount); fillSinWave(block, frequency, sampleCount); printf("Writing block...\n"); fflush(stdout); writeAudioBlock(waveOut, block, sampleCount * waveFormat.nBlockAlign); waveOutClose(waveOut); return 0; }
void* MarkedAllocator::allocateSlowCase(size_t bytes) { ASSERT(m_heap->vm()->apiLock().currentThreadIsHoldingLock()); #if COLLECT_ON_EVERY_ALLOCATION m_heap->collectAllGarbage(); ASSERT(m_heap->m_operationInProgress == NoOperation); #endif ASSERT(!m_freeList.head); m_heap->didAllocate(m_freeList.bytes); void* result = tryAllocate(bytes); if (LIKELY(result != 0)) return result; if (m_heap->shouldCollect()) { m_heap->collect(Heap::DoNotSweep); result = tryAllocate(bytes); if (result) return result; } ASSERT(!m_heap->shouldCollect()); MarkedBlock* block = allocateBlock(bytes); ASSERT(block); addBlock(block); result = tryAllocate(bytes); ASSERT(result); return result; }
ByteArray &ByteArray::replace(ByteArray search, ByteArray replace, int offset, int maxcnt) { ssize_t cnt=0, index, size_dif, slen, rlen; slen = search.length(); rlen = replace.length(); size_dif = rlen - slen; while ((index = indexOf(search, offset)) != -1) { if (maxcnt > 0 && cnt == maxcnt) break; if (size_dif > 0) { if (_length+size_dif >= size) allocateBlock(_length+size_dif); for (ssize_t i=_length; i>=index+slen; i--) buffer[i+size_dif] = buffer[i]; } else if (size_dif < 0) { for (ssize_t i=index+rlen; i<_length; i++) buffer[i] = buffer[i-size_dif]; } memcpy(&buffer[index], replace.operator const char *(), rlen); _length += size_dif; cnt++; } return *this; }
void *myalloc2(int size) { int mem_size = ((size+3)/8 + 1) * 8; int *ptr = start; int newSize=0; int oldsize = *(start-1); while(oldsize != 0){ if (oldsize >= mem_size && !blockAllocated(ptr)){ *(ptr-1) = mem_size; allocateBlock(ptr); newSize = oldsize - mem_size; if (newSize > 0) *(nextBlock(ptr)-1) = newSize; return ptr; } ptr = nextBlock(ptr); oldsize = *(ptr-1); } fprintf(stderr, "No space to allocate more\n"); return NULL; }
struct dirEntryStruct *getUnusedEntry (void) { while (TRUE) { entryIndex_t i; /* search through current block for an empty entry */ for (i = 0; i < DIR_ENTRIES_IN_BLOCK; i++) { if (!dirBuffer.dirEntry[i].startBlock) { return &dirBuffer.dirEntry[i]; } } /* if no more blocks in directory chain, make a new one */ if (!dirBuffer.nextBlock) { if (!(dirBuffer.nextBlock = allocateBlock())) { return NULL; } dirBufferChanged = TRUE; flushDirBuffer(); dirBufferBlock = dirBuffer.nextBlock; dirBufferBlockNumber = ~0; memset (&dirBuffer, '\0', BLOCKSIZE); dirBufferChanged = TRUE; } else { /* load the next dir block in chain */ getBlock (dirBuffer.nextBlock); } } return NULL; }
void* MarkedAllocator::allocateSlowCase(size_t bytes) { ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock()); doTestCollectionsIfNeeded(); ASSERT(!m_markedSpace->isIterating()); ASSERT(!m_freeList.head); m_heap->didAllocate(m_freeList.bytes); void* result = tryAllocate(bytes); if (LIKELY(result != 0)) return result; if (m_heap->collectIfNecessaryOrDefer()) { result = tryAllocate(bytes); if (result) return result; } ASSERT(!m_heap->shouldCollect()); MarkedBlock* block = allocateBlock(bytes); ASSERT(block); addBlock(block); result = tryAllocate(bytes); ASSERT(result); return result; }
void CopiedSpace::init() { m_oldGen.toSpace = &m_oldGen.blocks1; m_oldGen.fromSpace = &m_oldGen.blocks2; m_newGen.toSpace = &m_newGen.blocks1; m_newGen.fromSpace = &m_newGen.blocks2; allocateBlock(); }
void* MarkedAllocator::allocateSlowCase() { #if COLLECT_ON_EVERY_ALLOCATION m_heap->collectAllGarbage(); ASSERT(m_heap->m_operationInProgress == NoOperation); #endif void* result = tryAllocate(); if (LIKELY(result != 0)) return result; AllocationEffort allocationEffort; if (m_heap->shouldCollect()) allocationEffort = AllocationCanFail; else allocationEffort = AllocationMustSucceed; MarkedBlock* block = allocateBlock(allocationEffort); if (block) { addBlock(block); void* result = tryAllocate(); ASSERT(result); return result; } m_heap->collect(Heap::DoNotSweep); result = tryAllocate(); if (result) return result; ASSERT(m_heap->waterMark() < m_heap->highWaterMark()); addBlock(allocateBlock(AllocationMustSucceed)); result = tryAllocate(); ASSERT(result); return result; }
void* Allocator::alloc(size_t size) { //Find a free block of memory. char* pBlock = findFreeBlock(size); if(!pBlock) throw std::bad_alloc(); //Allocate the block allocateBlock(pBlock, size); return (void*)pBlock; }
void PoolMemoryAllocator::incVectorSlot(PoolElement &pe) { if(pe.m_currentVector == 0 || ++pe.m_index == ePoolVectorLength) { if(s_freeVectors == 0) s_freeVectors = allocateBlock(sizeof(PoolVector)); PoolVector *pv = (PoolVector *)s_freeVectors; s_freeVectors = MemElemPtr(pv)->m_next; pe.m_currentVector = pv; pe.m_index = 0; } }
// Get a chunk of memory from the pool T* getMem() { // Allocate a new block if we've run out of chunks if (_freeList.empty()) allocateBlock(); // Grab the next chunk off the free list T* chunk = _freeList.front(); _freeList.pop_front(); return chunk; }
CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr) { if (isOversize(bytes)) return tryAllocateOversize(bytes, outPtr); ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock()); m_heap->didAllocate(m_allocator.currentCapacity()); allocateBlock(); *outPtr = m_allocator.forceAllocate(bytes); return true; }
ByteArray &ByteArray::insert(int index, const char* ins, size_t len) { if (_length+len >= size) allocateBlock(_length+len); for (ssize_t i=_length; i>=index; i--) buffer[i+len] = buffer[i]; memcpy(&buffer[index], ins, len); _length += len; return *this; }
void* sf_malloc(size_t size){ if(size < 0x1 || size > 0x100000000) return NULL; /* invalid request size */ if(start == end) init_heapSpace(); /* no call has been made to request heap space, do so here */ uintptr_t *cursor = (uintptr_t *) NULL_POINTER; restart_search: /* kind of like the cache if there's no space big enough(miss), get that space and tell you to ask me again */ cursor = (uintptr_t*) head; uintptr_t allocate_size = 0; if(size < 16) allocate_size = 16; else if (size % 8 == 0) allocate_size = (uintptr_t) size; else allocate_size =(uintptr_t) (size + (8 - (size % 8))); if((allocate_size / 8) % 2 != 0) allocate_size += 8; uintptr_t *result = (uintptr_t*) NULL_POINTER; if(cursor != (uintptr_t *)NULL_POINTER){ while(get_loadSize(cursor) < allocate_size && (uintptr_t) *(cursor + 1) != NULL_POINTER) cursor = (uintptr_t *) *(cursor + 1); /* cursor = cursor.getNext(); */ uintptr_t loadSize = get_loadSize(cursor); /* if the request fits and there is space to split */ if(loadSize > (allocate_size + 48)){ uintptr_t **ptr = &cursor; splitBlock(ptr, size, allocate_size, loadSize); result = cursor + 1; } else if(loadSize >= allocate_size){ uintptr_t **ptr = &cursor; allocateBlock(ptr, size, allocate_size); result = cursor + 1; }else if(loadSize < allocate_size){ addHeapSpace(allocate_size); goto restart_search; } } /* could not find a space big enough to hold the requested size */ else { addHeapSpace(allocate_size); goto restart_search; } return result; }
void* MarkedSpace::allocateFromSizeClass(SizeClass& sizeClass) { for (MarkedBlock*& block = sizeClass.nextBlock ; block; block = block->next()) { if (void* result = block->allocate()) return result; m_waterMark += block->capacity(); } if (m_waterMark < m_highWaterMark) return allocateBlock(sizeClass)->allocate(); return 0; }
Byte* MemoryManager<AllocatorType>::allocate(size_t size, size_t alignment) { const size_t index{getIndexFromSize(size + alignment)}; typename BlockList::iterator it{allocateBlock(index)}; if(_activateMemoryDump) memoryDump(); if(it == _allocatedBlocks[index].end()) { out << "Error: MemoryMamanger::allocate: no more memory, nullptr returned\n"; return nullptr; } else return getAlignedAddress(it, alignment); }
void CopiedSpace::doneCopying() { { MutexLocker locker(m_loanedBlocksLock); while (m_numberOfLoanedBlocks > 0) m_loanedBlocksCondition.wait(m_loanedBlocksLock); } ASSERT(m_inCopyingPhase == m_shouldDoCopyPhase); m_inCopyingPhase = false; DoublyLinkedList<CopiedBlock>* toSpace; DoublyLinkedList<CopiedBlock>* fromSpace; TinyBloomFilter* blockFilter; if (heap()->operationInProgress() == FullCollection) { toSpace = m_oldGen.toSpace; fromSpace = m_oldGen.fromSpace; blockFilter = &m_oldGen.blockFilter; } else { toSpace = m_newGen.toSpace; fromSpace = m_newGen.fromSpace; blockFilter = &m_newGen.blockFilter; } while (!fromSpace->isEmpty()) { CopiedBlock* block = fromSpace->removeHead(); // We don't add the block to the blockSet because it was never removed. ASSERT(m_blockSet.contains(block)); blockFilter->add(reinterpret_cast<Bits>(block)); toSpace->push(block); } if (heap()->operationInProgress() == EdenCollection) { m_oldGen.toSpace->append(*m_newGen.toSpace); m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks); m_oldGen.blockFilter.add(m_newGen.blockFilter); m_newGen.blockFilter.reset(); } ASSERT(m_newGen.toSpace->isEmpty()); ASSERT(m_newGen.fromSpace->isEmpty()); ASSERT(m_newGen.oversizeBlocks.isEmpty()); allocateBlock(); m_shouldDoCopyPhase = false; }
inline extern bool_t createDir (char *name) { struct dirEntryStruct *entry; getBlock (currentDir); if (nameValid (name)) { if ((entry = getUnusedEntry())) {// nicht mehr als 256 , da dir nicht mehr ausgibt! if (!getEntryByName (name)) { //alle entrys auch files! if ((entry->startBlock = allocateBlock())) { dirBufferChanged = TRUE; /* set entry */ memcpy (entry->fileName, name, FILE_NAME_SIZE); entry->fileSize = 0; entry->fileType = DIR; entry->readOnly = FALSE; entry->splat = FALSE; { /* init new directory block */ getBlock (entry->startBlock); memset (&dirBuffer, '\0', BLOCKSIZE); dirBuffer.dirEntry[0].startBlock = currentDir; memcpy_P (dirBuffer.dirEntry[0].fileName, PSTR ("."), 3); dirBuffer.dirEntry[0].fileSize = 0; dirBuffer.dirEntry[0].fileType = DIR; dirBuffer.dirEntry[0].readOnly = TRUE; dirBuffer.dirEntry[0].splat = FALSE; dirBufferChanged = TRUE; } flushDirBuffer(); flushFreeBlockList(); return TRUE; } } } } return FALSE; }
void* Heap::allocate(NewSpace::SizeClass& sizeClass) { #if COLLECT_ON_EVERY_ALLOCATION collectAllGarbage(); ASSERT(m_operationInProgress == NoOperation); #endif m_operationInProgress = Allocation; void* result = m_newSpace.allocate(sizeClass); m_operationInProgress = NoOperation; if (result) return result; if (m_newSpace.waterMark() < m_newSpace.highWaterMark()) { m_newSpace.addBlock(sizeClass, allocateBlock(sizeClass.cellSize)); return allocate(sizeClass); } collect(DoNotSweep); return allocate(sizeClass); }
typename MemoryManager<AllocatorType>::BlockList::iterator MemoryManager<AllocatorType>::allocateBlock(size_t index) { // This should never happens (TODO: write an assert instead) if(index >= _addressSize) return _allocatedBlocks[_addressSize].end(); if(_freeBlocks[index].empty()) { // If we ask for the biggest block possible, but there is no free one if(index + 1 == _addressSize) return _allocatedBlocks[index].end(); // Try to get a bigger block typename BlockList::iterator biggerBlock{allocateBlock(index + 1)}; // If this is not possible if(biggerBlock == _allocatedBlocks[index + 1].end()) return _allocatedBlocks[index].end(); // Erase the bigger allocated block Byte* baseAddress{*biggerBlock}; _allocatedBlocks[index + 1].erase(biggerBlock); // Create a block containing the second half of biggerBlock Byte* midAddress{baseAddress + (1L << index)}; // Add it to free list _freeBlocks[index].pushFront(midAddress); // And add the allocated block (the first half of biggerBlock) to the allocated list _allocatedBlocks[index].pushFront(baseAddress); return _allocatedBlocks[index].begin(); } else { // Move an element from the free list to the allocated list _allocatedBlocks[index].pushFront(_freeBlocks[index].front()); _freeBlocks[index].popFront(); return _allocatedBlocks[index].begin(); } }
int TcpSocket::RecvBuffer::receive(Handle handle) { while (true) { size_t blockIdx = _end / _blockSize; size_t blockPos = _end % _blockSize; if (blockIdx == _blocks.size()) _blocks.push_back(allocateBlock()); uint8* buf = _blocks[blockIdx] + blockPos; size_t bufSize = _blockSize - blockPos; int read = ::recv(handle, (char*)buf, bufSize, NIT_SOCKET_SENDRECV_FLAGS); if (read == SOCKET_ERROR || read == 0) return read; _end += read; assert(blockPos <= _blockSize); } }