Example #1
0
void* __malloc_internal(size_t size)
{
	size_t rounded_size;
	size_t shifted_size;
	void *address;

	/* Round size to a power of two */
	rounded_size = MINIMUM_BLOCK_SIZE;
	for (shifted_size = (size + MALLOCED_HEADER_SIZE) / MINIMUM_BLOCK_SIZE;
		shifted_size; shifted_size >>= 1)
		rounded_size <<= 1;

	assert(rounded_size >= size + MALLOCED_HEADER_SIZE);

	address = find_free_block(rounded_size);
	if (address == 0) {
		grow_heap(rounded_size);
		address = find_free_block(rounded_size);
		if (address == 0)
			return 0;
	}

	memset(address, ALLOC_FILL, rounded_size - MALLOCED_HEADER_SIZE);

	allocated_blocks++;
	total_allocations++;
	total_allocated += rounded_size;

	return address;
}
Example #2
0
File: api.c Project: lycis/emma
void* emma_malloc(size_t size)
{
    memory_page* list;
    int          base;
    size_t       blksize;
    memory_block *block;

    blksize = emma_nextp2(size);

    // get matching list
    base = emma_log2(blksize);
    if(memory_table_len < (base + 1))
    {
        init_memory_table(blksize);
    }

    // initialise first page of memory if not yet done
    if(memory_table[base] == 0) 
    {
        memory_table[base] = init_memory_page(blksize);
    }

    list = memory_table[base];

    // get a free block of the according size
    block = find_free_block(list); // TODO not working...

    // allocate a new page of memory if necessary
    if(block == 0)
    {
        memory_page *pNewPage, *currPage;
        pNewPage = init_memory_page(blksize);

        // add new page to the memory list
        currPage = list;
        while(currPage->next != 0)
        {
            currPage = currPage->next;
        }

        currPage->next = pNewPage;
        block = find_free_block(pNewPage); // get a free block on the new page
    }

    // safeguard memory
    memcpy(block->sgPre, EMMA_SG, EMMA_SG_LEN);
    memcpy(block->sgPost, EMMA_SG, EMMA_SG_LEN);

    return block->data;
}
Example #3
0
char *
malloc(size_t size) {
  struct malloc_chunk *block;
 
  if (size <= 0) {
    return NULL;
  }

  if (!heap_start) { // First call.
    block = request_space(NULL, size);
    if (!block) {
      return NULL;
    }
    heap_start = (char*) block;
  } else {
    struct malloc_chunk *last = (struct malloc_chunk*) heap_start;
    
    block = find_free_block(&last, size);
    
    if (!block) { // Failed to find free block.
      block = request_space(last, size);
      if (!block) {
        return NULL;
      }
    } else {      
	  // Found a block we can re-use
		/*
      if ((block->size - size) > (31+MALLOC_CHUNK_SIZE)) {
		 
		  // if the block we are re-using is considerably larger (32 bytes)
		  // than what we request, then split it.
		  int old_size = block->size;
		  block->size = size;
		  // save next ptr
		  struct malloc_chunk *next = (struct malloc_chunk*) block->fd;
		  char* p = (char*) block;
		  p = p + (MALLOC_CHUNK_SIZE + size);
		  
		  block->fd = (struct malloc_chunk*) p;
		  
		  // point to next free byte
		  //(char*) block->fd = (char*) block + size + MALLOC_CHUNK_SIZE;
		  
		  // create a new block header for the remainder of the block
		  struct malloc_chunk *split = (struct malloc_chunk*) block->fd;
		  
		  split->size = old_size - MALLOC_CHUNK_SIZE - size;
		  split->is_free = 1;
		  split->fd = next;
		  
		  
		  
	  }
	  */
      block->is_free = 0;
    }
  }

  return(block+1);
}
void *myallocate_self(int size)
{	
	struct block_meta *block;
	struct block_meta *last = global_base;
	
	
	
	if (!global_base) 
	{ // First call.
	   
	   	block = request_space(NULL, size);
	    if (!block) 
	    {
	      return NULL;
	    }
	    global_base = block;
    } 
  else {
    block = find_free_block(&last, size);
    if (!block) 
    { // Failed to find free block.
      block = request_space(last, size);
      if (!block) 
      {
        return NULL;
      }
    } 
    else 
    {      // Found free block
      // TODO: consider splitting block here.
      block->free = 0;
    }
  }
  return block;
}
int get_free_block(jfs_t *jfs) 
{
    int groups, i;

    if (jfs->d_img->size % (BLOCKSIZE * BLOCKS_PER_GROUP) == 0) {
	groups = jfs->d_img->size / (BLOCKSIZE * BLOCKS_PER_GROUP);
    } else {
	groups = 1 + jfs->d_img->size / (BLOCKSIZE * BLOCKS_PER_GROUP);
    }

    for (i = 0; i < groups; i++) {
	struct blockgroup *grp;
	char block[BLOCKSIZE];
	int freeblock;
	/* read the blockgroup header */
	jfs_read_block(jfs, block, i * BLOCKS_PER_GROUP);
	grp = (struct blockgroup *)block;
	freeblock = -1;
	freeblock = find_free_block(grp->block_bitmap);
	if (freeblock >= 0) {
	    set_block_used(grp->block_bitmap, freeblock);
	    jfs_write_block(jfs, block, i * BLOCKS_PER_GROUP);
	    /* if there's a free block in this group, then return it */
	    freeblock += i * BLOCKS_PER_GROUP;
	    /* printf("allocating free block: %d\n", freeblock);*/
	    return freeblock;
	} 
    }
    return -1;
}
/*
 * mm_realloc - Implemented fairly simply by finding a free block (or expanding the heap), copying the data, and freeing the original block
 */
void *mm_realloc(void *ptr, size_t size)
{
  int totalSizeNeeded = ALIGN(size + OVERHEAD);
  listBlock *oldBlock = ptr - OFFSET;
  listBlock *newBlock = find_free_block(totalSizeNeeded);

  int copySize = (oldBlock->header & ~0x1) - OFFSET;
  if (totalSizeNeeded < copySize) {
    copySize = totalSizeNeeded;
  }

  if (newBlock == NULL) { // no free block found
    newBlock = (listBlock *)mem_sbrk(totalSizeNeeded);
    if ((long)newBlock == -1) {
      return NULL;
    }
    else {
      newBlock->header = totalSizeNeeded | 0x1;
      int *footer = (int *)(((char *)newBlock) + totalSizeNeeded - WORD_SIZE);
      *footer = totalSizeNeeded | 0x1;
    }
  }

  else { // if a free block was found
    if (newBlock->header - totalSizeNeeded < OVERHEAD) { // not enough room left over for overhead of a free block
      newBlock->header = newBlock->header | 0x1; // allocate the whole block
      newBlock->prev->next = newBlock->next; // remove from free list
      newBlock->next->prev = newBlock->prev;
    }
    else {
      int oldBlockSize = newBlock->header; // store the found block's size
      newBlock->header = totalSizeNeeded | 0x1;
      int *footer = (int *)(((char *)newBlock) + totalSizeNeeded - WORD_SIZE);
      *footer = totalSizeNeeded | 0x1;
      listBlock *splitBlock = (listBlock *)(((char *)newBlock) + totalSizeNeeded);
      splitBlock->header = oldBlockSize - totalSizeNeeded;
      footer = (int *)(((char *)splitBlock) + splitBlock->header - WORD_SIZE);
      *footer = oldBlockSize - totalSizeNeeded;
      splitBlock->prev = newBlock->prev; // insert splitBlock into the free list
      splitBlock->next = newBlock->next;
      newBlock->prev->next = splitBlock; // remove newBlock from the free list and redirect to splitBlock
      newBlock->next->prev = splitBlock;
    }
  }
  
  if (newBlock == NULL) {
    return NULL;
  }
  newBlock = (listBlock *)((char *)newBlock + OFFSET);

  memcpy(newBlock, ptr, copySize);
  mm_free(ptr);
  return newBlock;
}
Example #7
0
void *getmem_debug(size_t n, char *file, int line)
{
	void *x = getmem_raw(n + ALIGNSHIFT);
	long i = find_free_block();

	*(long *)x = i;
	mem[i].block = x;
	mem[i].serial = current_serial ++;
	mem[i].file = file;
	mem[i].line = line;
	return (void *)((unsigned char *)x + ALIGNSHIFT);
}
static int find_writeable_block(struct partition *part, u_long *old_sector)
{
	int rc, block;
	size_t retlen;

	block = find_free_block(part);

	if (block == -1) {
		if (!part->is_reclaiming) {
			rc = reclaim_block(part, old_sector);
			if (rc)
				goto err;

			block = find_free_block(part);
		}

		if (block == -1) {
			rc = -ENOSPC;
			goto err;
		}
	}

	rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
		part->header_size, &retlen, (u_char*)part->header_cache);

	if (!rc && retlen != part->header_size)
		rc = -EIO;

	if (rc) {
		printk(KERN_NOTICE PREFIX "'%s': unable to read header at "
				"0x%lx\n", part->mbd.mtd->name,
				part->blocks[block].offset);
		goto err;
	}

	part->current_block = block;

err:
	return rc;
}
Example #9
0
/* Allocate a block of memory from the mark and sweep GC heap */
heap_block *heap::heap_allot(cell size, cell type)
{
	size = (size + block_size_increment - 1) & ~(block_size_increment - 1);

	free_heap_block *block = find_free_block(size);
	if(block)
	{
		block = split_free_block(block,size);
		block->set_type(type);
		block->set_marked_p(false);
		return block;
	}
	else
		return NULL;
}
Example #10
0
/* Allocate a block of memory from the mark and sweep GC heap */
heap_block *heap::heap_allot(cell size)
{
	size = (size + block_size_increment - 1) & ~(block_size_increment - 1);

	free_heap_block *block = find_free_block(size);
	if(block)
	{
		block = split_free_block(block,size);

		block->status = B_ALLOCATED;
		return block;
	}
	else
		return NULL;
}
Example #11
0
int 
blockdevice_writeblock(BLOCKDEVICEP dev, void *block, int offset, int size, int blocknum, int waitforcomplete)
{
	int i ;
	int rc ;

	if (! dev) {
		/*printf("!dev\n") ;*/
		return -1 ;
	}

	if ((size < 0) || (offset < 0)) {
		/*printf("size < 1 %d, offset < 0 %d\n", size,offset) ;*/
		return -1 ;
	}

	if ( size > dev->blocksize ) {
		/*printf("size %d > dev->blocksize %d\n", size, dev->blocksize) ;*/
		return -1 ;
	}

	if (offset > size) {
		/*printf("offset %d > size %d\n", size, dev->blocksize) ;*/
		return -1 ;
	}

	i = find_free_block(dev->bd->datablocks, dev->numblocks) ;

	/*
	** Yay! We found a block, stuff data into it!
	*/
	dev->bd->datablocks[i].datalength = size ;
	dev->bd->datablocks[i].blocknum = blocknum ;
	dev->bd->datablocks[i].offset = offset ;
	memcpy(dev->bd->datablocks[i].data, block, size) ;

	/*
	** Signal dirty block.
	*/
	rc = semaphore_increment(dev->dirtysem) ;

	if (waitforcomplete) {
		rc = semaphore_decrement(dev->sem) ;
	}

	return size ;
}
Example #12
0
int		ia_move(t_princ *lemip)
{
  t_pos		direction;

  direction.x = -1;
  direction.y = -1;
  ia_scan_map(lemip, &(lemip->player), &direction);
  if (direction.x == -1 || lemip->map[direction.y *
				      MAP_LEN + direction.x] != 0)
    find_free_block(lemip, &direction);
  if (direction.x != -1)
    {
      lemip->map[lemip->player.ia.y * MAP_LEN + lemip->player.ia.x] = 0;
      lemip->player.ia.x = direction.x;
      lemip->player.ia.y = direction.y;
      lemip->map[direction.y * MAP_LEN + direction.x] = lemip->player.team;
    }
  return (0);
}
Example #13
0
void *malloc(size_t size){
  sum_allocs++;
  sum_alloc_size += size;
  num_allocated++;

  block_t current, last;
  size = align_pointer(size);
  if (base) {
    /* First find a block */
    last = base;
    current = find_free_block(&last, size);
    if (current) {
      //there is a fitting free block
      if (BLOCK_SIZE_MIN <= current->size - size){
        // we can split the block
        split_block(current, size);
      }
      current->free=0;
    } else {
        //there are no fitting blocks
        current = new_block(last, size);
      if (!current){ // error
        perror("error at !current");
        return NULL;
      }
    }
  } else {
    //first malloc (base == NULL)
    current = new_block(NULL, size);
    if (!current){
      perror("error at !current");
      return(NULL);
    }
    base = current;
  }
  return current->raw;

}
Example #14
0
/*
 * malloc
 */
void *malloc (size_t size) {
    void* p;
    int index;
    if(heap_start == 0)
                mm_init();
    checkheap(1);  // Let's make sure the heap is ok!

    if(size == 0) return NULL;

    size += 8 - size%8;
    size += (HEADER_SIZE + FOOTER_SIZE);

    if(size > MAX_SIZE) return NULL;

    index = get_free_list_index(size);
    p = find_free_block(index, size);

    if(p == NULL) return NULL;

    p = block_mem(p);

    checkheap(1);
    return p;
}
/* 
 * mm_malloc - Checks for an available free block of at least the required size (taking overhead and alignment into account).
 *     If no suitable free block is found, the heap is expanded by 2 x size, split, and coalesced if possible. If one is found, it's
 *     split if possible, otherwise the whole block is used.
 */
void *mm_malloc(size_t size)
{
    unsigned int totalSizeNeeded = ALIGN(size + OVERHEAD);
    listBlock *newBlock = find_free_block(totalSizeNeeded);
    int *footer;

    if (newBlock == NULL) { // no free block large enough
      newBlock = (listBlock *)mem_sbrk(totalSizeNeeded * 2);
      if ((long)newBlock == -1) {
	return NULL;
      }
      else { 
	newBlock->header = totalSizeNeeded; // set up free block at front
	footer = (int *)(((char *)newBlock) + totalSizeNeeded - WORD_SIZE);
	*footer = totalSizeNeeded;
	listBlock *firstFree = (listBlock *)mem_heap_lo(); // insert into free list
	newBlock->next = firstFree->next;
	newBlock->prev = firstFree;
	firstFree->next = newBlock; 
	newBlock->next->prev = newBlock;

	listBlock *allocate = (listBlock *)(((char *)newBlock) + totalSizeNeeded); // create a new allocated block after the free block 
	allocate->header = totalSizeNeeded | 0x1;
	footer = (int *)(((char *)allocate) + totalSizeNeeded - WORD_SIZE);
	*footer = totalSizeNeeded | 0x1;

	/* try coalesing the new leading free portion with the block in front */
	int *lowerFooter = (int *)(((char *)newBlock) - WORD_SIZE);
	listBlock *lowerBlock = (listBlock *)(((char *)newBlock) - (*lowerFooter & ~0x1));
	if ((lowerBlock->header & 0x1) == 0) { // if previous block is free
	  if (lowerBlock != (listBlock *)mem_heap_lo()) { // coalesce if it isn't the dummy starting block
	    lowerBlock->header = lowerBlock->header + newBlock->header;
	    footer = (int *)(((char *)lowerBlock) + lowerBlock->header - WORD_SIZE);
	    *footer = lowerBlock->header;
	    newBlock->next->prev = newBlock->prev; // remove block being absorbed from free list
	    newBlock->prev->next = newBlock->next;
	  }
	}
	return (char *)allocate + OFFSET;
      }
    }

    /* If a free block was found, split if possible and relink the free list appropriately */
    else {
      if (newBlock->header - totalSizeNeeded < OVERHEAD) { // not enough room left over to accomodate overhead required for a free block 
	footer = (int *)(((char *)newBlock) + newBlock->header - WORD_SIZE); // set pointer to the footer
	newBlock->header = newBlock->header | 0x1; // then just allocate the entire block
	*footer = newBlock->header;
	newBlock->prev->next = newBlock->next; // splice it out from the free list
	newBlock->next->prev = newBlock->prev;
      }
      
      else { // there is room left over to split the blocks
	int oldBlockSize = newBlock->header; // store the original block size
	newBlock->header = totalSizeNeeded | 0x1; // allocate a block of just the necessary size
	footer = (int *)(((char *)newBlock) + totalSizeNeeded - WORD_SIZE); // set pointer to footer
	*footer = totalSizeNeeded | 0x1; // set footer to size and allocated
	listBlock *splitBlock = (listBlock *)(((char *)newBlock) + totalSizeNeeded); // place a pointer immediately following the newly allocated block
	splitBlock->header = oldBlockSize - totalSizeNeeded; // set the size of the new free block
	footer = (int *)(((char *)splitBlock) + splitBlock->header - WORD_SIZE); // set pointer to footer
	*footer = oldBlockSize - totalSizeNeeded;
	splitBlock->prev = newBlock->prev; // insert splitBlock into the free list
	splitBlock->next = newBlock->next;
	newBlock->prev->next = splitBlock; // remove newBlock from the free list and redirect to splitBlock
	newBlock->next->prev = splitBlock;
      }
    }
    return (char *)newBlock + OFFSET;
}
Example #16
0
/* returns: a block of allocated memory of the specified size */
void* marksweep_malloc(size_t size_in_bytes)
{
#ifdef WITH_PRECISE_GC_STATISTICS
  static jint time = 0;
#endif
  static size_t bytes_allocated = 0;
  size_t aligned_size_in_bytes = 0;
  void *result = NULL;
  aligned_size_in_bytes = align(size_in_bytes + BLOCK_HEADER_SIZE);

  ENTER_MARKSWEEP_GC();

#ifdef GC_EVERY_TIME
  marksweep_collect();
#endif

  result = find_free_block(aligned_size_in_bytes, 
			   &(heap.free_list), 
			   heap.small_blocks);

  if (result != NULL)
    {
      // get past block header
      result = ((struct block *)result)->object;
    }
  else
    {
      // no free blocks, allocate or expand heap
      if (collection_makes_sense(bytes_allocated))
	{
	  setup_for_threaded_GC();

	  marksweep_collect();
	  bytes_allocated = 0;
	  
	  cleanup_after_threaded_GC();
	}

      // see if we have the necessary memory,
      // either with or without collection
      result = allocate_in_marksweep_heap(aligned_size_in_bytes, &heap);

      // if we didn't collect already and we
      // ran out of memory, run a collection
      if (result == NULL && bytes_allocated != 0) {
	  setup_for_threaded_GC();

	  marksweep_collect();
	  bytes_allocated = 0;
	  
	  cleanup_after_threaded_GC();
      }
    }

  // if no memory at this point, just fail
  assert(result != NULL);

#ifdef WITH_PRECISE_GC_STATISTICS
  {
    struct block *bl = result - BLOCK_HEADER_SIZE;
    bl->time = time++;
  }
#endif

  bytes_allocated += aligned_size_in_bytes;

  EXIT_MARKSWEEP_GC();

  return result;
}