Ejemplo n.º 1
0
Archivo: pool.c Proyecto: DevL/ponyc
static void* pool_alloc_pages(size_t size)
{
  if(pool_block_header.total_size >= size)
  {
    pool_block_t* block = pool_block_header.head;

    while(block != NULL)
    {
      if(block->size > size)
      {
        // Use size bytes from the end of the block. This allows us to keep the
        // block info inside the block instead of using another data structure.
        size_t rem = block->size - size;
        block->size = rem;
        pool_block_header.total_size -= size;

        // TODO: can track largest size
        // if we're the last element, it's either our new size or the previous
        // block size, if that's larger.

        if((block->prev != NULL) && (block->prev->size > block->size))
        {
          // If we are now smaller than the previous block, move us forward in
          // the list.
          pool_block_remove(block);
          pool_block_insert(block);
        }

        return (char*)block + rem;
      } else if(block->size == size) {
        // Remove the block from the list.
        pool_block_remove(block);

        // Return the block pointer itself.
        pool_block_header.total_size -= size;
        return block;
      }

      block = block->next;
    }
  }

  // We have no free blocks big enough.
  if(size >= POOL_MMAP)
    return virtual_alloc(size);

  pool_block_t* block = (pool_block_t*)virtual_alloc(POOL_MMAP);
  size_t rem = POOL_MMAP - size;

  block->size = rem;
  block->next = NULL;
  block->prev = NULL;
  pool_block_header.total_size += rem;
  pool_block_insert(block);

  return (char*)block + rem;
}
Ejemplo n.º 2
0
static pool_item_t* pool_pages(pool_local_t* thread, pool_global_t* global)
{
  char* p = (char*)virtual_alloc(POOL_MMAP);
  thread->start = p + global->size;
  thread->end = p + POOL_MMAP;
  return (pool_item_t*)p;
}
Ejemplo n.º 3
0
/*
 Code Reference :  My Previous Submission
 */
void map_exe(struct mm_struct * mm_struct)
{
    //  printk("\n Map Exe");
    //  while(1);
    // printk("\nStart Address %x",exeFormat.entryAddr);
    uint64_t entryAddress = (uint64_t) exeFormat.entryAddr;
    mm_struct->entryAddress = entryAddress;
    int i = 0;
    for (; i < exeFormat.numSegments; i++)
    {
        struct Exe_Segment segment =
                (struct Exe_Segment) exeFormat.segmentList[i];
        uint64_t start = segment.vaddr;
        uint64_t end = segment.vaddr + segment.sizeInMemory;
        uint64_t pageNeeded = 0;
        uint64_t least_start = 0;
        uint64_t max_end = 0;
        if (end - start != 0)
        {
            least_start = (start / 0x1000) * 0x1000;
            max_end = (end / 0x1000) * 0x1000 + 0x1000;
            //   printk("\n least_start : %x",least_start);
            // printk("\n max_end : %x",max_end);
            pageNeeded = (max_end - least_start) / 0x1000;
            //printk("\nPage Needed :%x",pageNeeded);
        }
        if (pageNeeded != 0)
        {
            struct vm_area_struct* vm_area =
                    (struct vm_area_struct *) virtual_alloc();
            vm_area->vm_start = least_start;
            vm_area->vm_end = max_end;
            mm_struct->current->vm_next = vm_area;
            mm_struct->current = vm_area;
            mm_struct->current->vm_next = NULL;
            while (pageNeeded != 0)
            {
                void *physicalAddress = page_alloc();
                //  printk("\n Physical Address :%x",physicalAddress);
                map_process(least_start, (uint64_t) physicalAddress);

                pageNeeded -= 1;
                least_start += 0x1000;
            }
            uint64_t ondiskstart = segment.offsetInFile + elf_start;
            uint64_t size = segment.sizeInMemory;
            char *ondisk = (char *) ondiskstart;
            char *vadd = (char *) start;
            //        printk("\n Copying ... ");
            while (size)
            {
                *vadd = *ondisk;
                vadd++;
                ondisk++;
                size--;
            }
        }
    }
}
Ejemplo n.º 4
0
inline void
bank_init(Bank *stack, u32 capacity)
{
    ASSERT(stack);
    stack->begin = stack->it = (u8 *)virtual_alloc(0, capacity);
    ASSERT(stack->begin);
    stack->end = stack->begin + capacity;
}
Ejemplo n.º 5
0
void* pool_alloc_size(size_t size)
{
  size_t index = pool_index(size);
  void* p;

  if(index < POOL_COUNT)
    return pool_alloc(index);
  //   p = pool_get(index);
  // else
    p = virtual_alloc(size);

#ifdef USE_VALGRIND
  VALGRIND_MALLOCLIKE_BLOCK(p, size, 0, 0);
#endif

  return p;
}
Ejemplo n.º 6
0
void *virtual_alloc_rw(void *addr, uintptr_t size)
{
    return virtual_alloc(addr, size,
        MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
}
Ejemplo n.º 7
0
//static
void *mmap ( void *ptr, INTERNAL_INTPTR_T size, INTERNAL_INTPTR_T prot, INTERNAL_INTPTR_T type, INTERNAL_INTPTR_T handle, INTERNAL_INTPTR_T arg) {
#ifndef USE_PTMALLOC3_ARENA
    static INTERNAL_INTPTR_T g_pagesize;
    static INTERNAL_INTPTR_T g_regionsize;
    DWORD alloc=MEM_RESERVE|MEM_TOP_DOWN, ntprot=0;
    INTERNAL_INTPTR_T rounding=0;
    char *p;
#ifdef TRACE
    printf ("mmap %p %d %d %d\n", ptr, size, prot, type);
#endif
    /* Wait for spin lock */
    slwait (&g_sl);
    /* First time initialization */
    if (! g_pagesize)
        g_pagesize = getpagesize ();
    if (! g_regionsize)
        g_regionsize = getregionsize ();
    /* Assert preconditions */
    assert (*(unsigned*) &ptr % g_pagesize == 0);
    assert (size % g_pagesize == 0);
    /* Allocate this */
    if(!(type & MAP_NORESERVE)) alloc|=MEM_COMMIT;
    if((prot & (PROT_READ|PROT_WRITE))==(PROT_READ|PROT_WRITE)) ntprot|=PAGE_READWRITE;
    else if(prot & PROT_READ) ntprot|=PAGE_READONLY;
    else if(prot & PROT_WRITE) ntprot|=PAGE_READWRITE;
    else
    {
        ntprot|=PAGE_NOACCESS;
        if(size==HEAP_MAX_SIZE)
        {
            rounding=size;
            size<<=1;
#ifdef TRACE
            printf("Rounding to multiple of %d\n", rounding);
#endif
        }
        if(ptr)
        {   /* prot==PROT_NONE also appears to be a euphemism for free */
            MEMORY_BASIC_INFORMATION mbi;
            DWORD read=0;
            for(p=((char *)ptr)+read; read<(DWORD) size && VirtualQuery(p, &mbi, sizeof(mbi)); read+=mbi.RegionSize)
            {
                if(mbi.State & MEM_COMMIT)
                {
//					if(!VirtualFree((LPVOID) p, mbi.RegionSize, MEM_DECOMMIT))
//						goto mmap_exit;
                    if(!VirtualAlloc((LPVOID) p, mbi.RegionSize, MEM_RESERVE, PAGE_NOACCESS))
                        goto mmap_exit;
#ifdef TRACE
                    printf ("Release %p %d\n", p, mbi.RegionSize);
#endif
                }
            }
            ptr=0; /* success */
            goto mmap_exit;
        }
    }
    ptr = VirtualAlloc (ptr, size, alloc, ntprot);
    if (! ptr) {
        ptr = (void *) MORECORE_FAILURE;
        goto mmap_exit;
    }
    if(rounding)
    {
//		VirtualFree(ptr, 0, MEM_RELEASE);
        VirtualAlloc(ptr, 0, MEM_RESERVE, PAGE_NOACCESS);
        ptr=(void *)(((INTERNAL_SIZE_T)ptr + (rounding-1)) & ~(rounding-1));
//		if(!(ptr=VirtualAlloc(ptr, rounding, alloc, ntprot)))
        if(0==(ptr=VirtualAlloc(ptr, rounding, alloc, ntprot)))
        {
            ptr = (void *) MORECORE_FAILURE;
            goto mmap_exit;
        }
        assert ((unsigned) ptr % rounding == 0);
        size=rounding;
    }
    else
    {
        /* Assert postconditions */
        assert ((unsigned) ptr % g_regionsize == 0);
    }
#ifdef TRACE
    printf ("%s %p %d %d %d\n", (type & MAP_NORESERVE) ? "Reserve" : "Commit", ptr, size, prot, type);
#endif
mmap_exit:
    /* Release spin lock */
    slrelease (&g_sl);
    return ptr;
#else // #ifndef USE_PTMALLOC3_ARENA
    void*		result;
    slwait		( &g_sl );
    result		= virtual_alloc ( &g_ptmalloc3_arena, (unsigned int)size );
    slrelease	( &g_sl );
    return		( result );
#endif // #ifndef USE_PTMALLOC3_ARENA
}