Пример #1
0
// create - Create a new (empty) slab and add it to the end of the Pools list.
PoolSlab *
PoolSlab::create(BitmapPoolTy *Pool) {
  unsigned NodesPerSlab = getSlabSize(Pool);

#ifndef NDEBUG
  unsigned Size = sizeof(PoolSlab) + 4*((NodesPerSlab+15)/16) +
    Pool->NodeSize*getSlabSize(Pool);
  assert(Size <= PageSize && "Trying to allocate a slab larger than a page!");
#endif
  PoolSlab *PS = (PoolSlab*)AllocatePage();

  assert(PS && "Allocating a page failed!");
  memset(PS, 0, sizeof(PoolSlab));
  PS->NumNodesInSlab = NodesPerSlab;
  PS->isSingleArray = 0;  // Not a single array!
  PS->FirstUnused = 0;    // Nothing allocated.
  PS->UsedBegin   = 0;    // Nothing allocated.
  PS->UsedEnd     = 0;    // Nothing allocated.
  PS->allocated   = 0;    // No bytes allocated.

  for (unsigned i = 0; i < PS->getSlabSize(); ++i)
    {
      PS->markNodeFree(i);
      PS->clearStartBit(i);
    }

  // Add the slab to the list...
  PS->addToList((PoolSlab**)&Pool->Ptr1);
  //  printf(" creating a slab %x\n", PS);
  return PS;
}
Пример #2
0
void *AllocateNPages(unsigned Num) {
  if (Num <= 1) return AllocatePage();
  void *Ptr = mmap(0, PageSize * Num, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 
#ifdef STATISTIC
  AddressSpaceUsage1++;
#endif    
  return Ptr;
  
}
Пример #3
0
	DMAAddr AllocateDMA(uint64_t size, bool Below4Gb)
	{
		DMAAddr ret;
		ret.phys = AllocatePage(size, Below4Gb);
		ret.virt = Virtual::AllocateVirtual(size, 0, 0, ret.phys);

		Virtual::MapRegion(ret.virt, ret.phys, size, 0x3);
		// Log("allocated DMA region: virt = %x, phys = %x, %d pages", ret.virt, ret.phys, size);
		return ret;
	}
Пример #4
0
// record one page
VOID SANDBOX::RecordPage(const char * page)
{
    const char * pageFrameStart = _pages[page];

    if (pageFrameStart != NULL)
    {
        return; // already recorded
    }

    pageFrameStart = AllocatePage(page);
    memcpy(const_cast<char *>(pageFrameStart), page, PageSize);
    ProtectPage(pageFrameStart);
}
void *FixedSizeAllocator::Allocate(void)
{
	// free list empty, create new page
	if (!m_freeList)
	{
		// allocate new page
		PageHeader *newPage = reinterpret_cast<PageHeader *>(AllocatePage());
		++m_numPages;
		m_numBlocks += m_blocksPerPage;
		m_numFreeBlocks += m_blocksPerPage;

		FillFreePage(newPage);

		// page list not empty, link new page
		if (m_pageList)
		{
			newPage->Next = m_pageList;
		}

		// push new page
		m_pageList = newPage;

		// link new free blocks
		BlockHeader *currBlock = newPage->Blocks();
		for (unsigned i = 0; i < m_blocksPerPage - 1; ++i)
		{
			currBlock->Next = NextBlock(currBlock);
			currBlock = NextBlock(currBlock);
		}
		currBlock->Next = nullptr; // last block

		// push new blocks
		m_freeList = newPage->Blocks();
	}

	// pop free block
	BlockHeader *freeBlock = m_freeList;
	m_freeList = m_freeList->Next;
	--m_numFreeBlocks;

	FillAllocatedBlock(freeBlock);

	return freeBlock;
}
Пример #6
0
	/*
	================
	idHeap::Init
	================
	*/
	void idHeap::Init () {
		OSAllocs			= 0;
		pageRequests		= 0;
		pageSize			= 65536 - sizeof( idHeap::page_s );
		pagesAllocated		= 0;								// reset page allocation counter
	
		largeFirstUsedPage	= NULL;								// init large heap manager
		swapPage			= NULL;
	
		memset( smallFirstFree, 0, sizeof(smallFirstFree) );	// init small heap manager
		smallFirstUsedPage	= NULL;
		smallCurPage		= AllocatePage( pageSize );
		assert( smallCurPage );
		smallCurPageOffset	= SMALL_ALIGN( 0 );
	
		defragBlock = NULL;
	
		mediumFirstFreePage	= NULL;								// init medium heap manager
		mediumLastFreePage	= NULL;
		mediumFirstUsedPage	= NULL;
	
		c_heapAllocRunningCount = 0;
	}
Пример #7
0
/*
================
idHeap::SmallAllocate

  allocate memory (1-255 bytes) from the small heap manager
  bytes = number of bytes to allocate
  returns pointer to allocated memory
================
*/
void *idHeap::SmallAllocate( dword bytes ) {
	// we need the at least sizeof( dword ) bytes for the free list
	if ( bytes < sizeof( dword ) ) {
		bytes = sizeof( dword );
	}

	// increase the number of bytes if necessary to make sure the next small allocation is aligned
	bytes = SMALL_ALIGN( bytes );

	byte *smallBlock = (byte *)(smallFirstFree[bytes / ALIGN]);
	if ( smallBlock ) {
		dword *link = (dword *)(smallBlock + SMALL_HEADER_SIZE);
		smallBlock[1] = SMALL_ALLOC;					// allocation identifier
		smallFirstFree[bytes / ALIGN] = (void *)(*link);
		return (void *)(link);
	}

	dword bytesLeft = (long)(pageSize) - smallCurPageOffset;
	// if we need to allocate a new page
	if ( bytes >= bytesLeft ) {

		smallCurPage->next	= smallFirstUsedPage;
		smallFirstUsedPage	= smallCurPage;
		smallCurPage		= AllocatePage( pageSize );
		if ( !smallCurPage ) {
			return NULL;
		}
		// make sure the first allocation is aligned
		smallCurPageOffset	= SMALL_ALIGN( 0 );
	}

	smallBlock			= ((byte *)smallCurPage->data) + smallCurPageOffset;
	smallBlock[0]		= (byte)(bytes / ALIGN);		// write # of bytes/ALIGN
	smallBlock[1]		= SMALL_ALLOC;					// allocation identifier
	smallCurPageOffset  += bytes + SMALL_HEADER_SIZE;	// increase the offset on the current page
	return ( smallBlock + SMALL_HEADER_SIZE );			// skip the first two bytes
}
Пример #8
0
void *AllocateNPages(unsigned Num) {
  if (Num <= 1) return AllocatePage();
  return GetPages(Num);
}
Пример #9
0
/*
================
idHeap::MediumAllocate

  allocate memory (256-32768 bytes) from medium heap manager
  bytes	= number of bytes to allocate
  returns pointer to allocated memory
================
*/
void *idHeap::MediumAllocate( dword bytes ) {
	idHeap::page_s		*p;
	void				*data;

	dword sizeNeeded = ALIGN_SIZE( bytes ) + ALIGN_SIZE( MEDIUM_HEADER_SIZE );

	// find first page with enough space
	for ( p = mediumFirstFreePage; p; p = p->next ) {
		if ( p->largestFree >= sizeNeeded ) {
			break;
		}
	}

	if ( !p ) {								// need to allocate new page?
		p = AllocatePage( pageSize );
		if ( !p ) {
			return NULL;					// malloc failure!
		}
		p->prev		= NULL;
		p->next		= mediumFirstFreePage;
		if (p->next) {
			p->next->prev = p;
		}
		else {
			mediumLastFreePage	= p;
		}

		mediumFirstFreePage		= p;
		
		p->largestFree	= pageSize;
		p->firstFree	= (void *)p->data;

		mediumHeapEntry_s *e;
		e				= (mediumHeapEntry_s *)(p->firstFree);
		e->page			= p;
		// make sure ((byte *)e + e->size) is aligned
		e->size			= pageSize & ~(ALIGN - 1);
		e->prev			= NULL;
		e->next			= NULL;
		e->prevFree		= NULL;
		e->nextFree		= NULL;
		e->freeBlock	= 1;
	}

	data = MediumAllocateFromPage( p, sizeNeeded );		// allocate data from page

    // if the page can no longer serve memory, move it away from free list
	// (so that it won't slow down the later alloc queries)
	// this modification speeds up the pageWalk from O(N) to O(sqrt(N))
	// a call to free may swap this page back to the free list

	if ( p->largestFree < MEDIUM_SMALLEST_SIZE ) {
		if ( p == mediumLastFreePage ) {
			mediumLastFreePage = p->prev;
		}

		if ( p == mediumFirstFreePage ) {
			mediumFirstFreePage = p->next;
		}

		if ( p->prev ) {
			p->prev->next = p->next;
		}
		if ( p->next ) {
			p->next->prev = p->prev;
		}

		// link to "completely used" list
		p->prev = NULL;
		p->next = mediumFirstUsedPage;
		if ( p->next ) {
			p->next->prev = p;
		}
		mediumFirstUsedPage = p;
		return data;
	} 

	// re-order linked list (so that next malloc query starts from current
	// matching block) -- this speeds up both the page walks and block walks

	if ( p != mediumFirstFreePage ) {
		assert( mediumLastFreePage );
		assert( mediumFirstFreePage );
		assert( p->prev);

		mediumLastFreePage->next	= mediumFirstFreePage;
		mediumFirstFreePage->prev	= mediumLastFreePage;
		mediumLastFreePage			= p->prev;
		p->prev->next				= NULL;
		p->prev						= NULL;
		mediumFirstFreePage			= p;
	}

	return data;
}