Пример #1
0
/* Allocate more memory from malloc for the heap.
   Return a blue block of at least the requested size.
   The blue block is chained to a sequence of blue blocks (through their
   field 0); the last block of the chain is pointed by field 1 of the
   first.  There may be a fragment after the last block.
   The caller must insert the blocks into the free list.
   The request must be less than or equal to Max_wosize.
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
  char *mem, *hp, *prev;
  asize_t over_request, malloc_request, remain;

  Assert (request <= Max_wosize);
  over_request = request + request / 100 * caml_percent_free;
  malloc_request = caml_round_heap_chunk_size (Bhsize_wosize (over_request));
  mem = caml_alloc_for_heap (malloc_request);
  if (mem == NULL){
    caml_gc_message (0x04, "No room for growing heap\n", 0);
    return NULL;
  }
  remain = malloc_request;
  prev = hp = mem;
  /* FIXME find a way to do this with a call to caml_make_free_blocks */
  while (Wosize_bhsize (remain) > Max_wosize){
    Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue);
#ifdef DEBUG
    caml_set_fields (Bp_hp (hp), 0, Debug_free_major);
#endif
    hp += Bhsize_wosize (Max_wosize);
    remain -= Bhsize_wosize (Max_wosize);
    Field (Op_hp (mem), 1) = Field (Op_hp (prev), 0) = (value) Op_hp (hp);
    prev = hp;
  }
  if (remain > 1){
    Hd_hp (hp) = Make_header (Wosize_bhsize (remain), 0, Caml_blue);
#ifdef DEBUG
    caml_set_fields (Bp_hp (hp), 0, Debug_free_major);
#endif
    Field (Op_hp (mem), 1) = Field (Op_hp (prev), 0) = (value) Op_hp (hp);
    Field (Op_hp (hp), 0) = (value) NULL;
  }else{
    Field (Op_hp (prev), 0) = (value) NULL;
    if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white);
  }
  Assert (Wosize_hp (mem) >= request);
  if (caml_add_to_heap (mem) != 0){
    caml_free_for_heap (mem);
    return NULL;
  }
  return Bp_hp (mem);
}
Пример #2
0
static char *compact_allocate_r (CAML_R, mlsize_t size)
                                      /* in bytes, including header */
{
  char *chunk, *adr;

  while (Chunk_size (compact_fl) - Chunk_alloc (compact_fl) <= Bhsize_wosize (3)
         && Chunk_size (Chunk_next (compact_fl))
            - Chunk_alloc (Chunk_next (compact_fl))
            <= Bhsize_wosize (3)){
    compact_fl = Chunk_next (compact_fl);
  }
  chunk = compact_fl;
  while (Chunk_size (chunk) - Chunk_alloc (chunk) < size){
    chunk = Chunk_next (chunk);                         Assert (chunk != NULL);
  }
  adr = chunk + Chunk_alloc (chunk);
  Chunk_alloc (chunk) += size;
  return adr;
}
Пример #3
0
/* Allocate more memory from malloc for the heap.
   Return a blue block of at least the requested size (in words).
   The caller must insert the block into the free list.
   The request must be less than or equal to Max_wosize.
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
  char *mem;
  asize_t malloc_request;

  malloc_request = caml_round_heap_chunk_size (Bhsize_wosize (request));
  mem = caml_alloc_for_heap (malloc_request);
  if (mem == NULL){
    caml_gc_message (0x04, "No room for growing heap\n", 0);
    return NULL;
  }
  Assert (Wosize_bhsize (malloc_request) >= request);
  Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Caml_blue);

  if (caml_add_to_heap (mem) != 0){
    caml_free_for_heap (mem);
    return NULL;
  }
  return Bp_hp (mem);
}
Пример #4
0
/* Allocate more memory from malloc for the heap.
   Return a block of at least the requested size (in words).
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
  char *mem;
  char *new_page_table = NULL;
  asize_t new_page_table_size = 0;
  asize_t malloc_request;
  asize_t i, more_pages;

  malloc_request = round_heap_chunk_size (Bhsize_wosize (request));
  gc_message ("Growing heap to %ldk\n",
	      (stat_heap_size + malloc_request) / 1024);
  mem = aligned_malloc (malloc_request + sizeof (heap_chunk_head),
                        sizeof (heap_chunk_head));
  if (mem == NULL){
    gc_message ("No room for growing heap\n", 0);
    return NULL;
  }
  mem += sizeof (heap_chunk_head);
  (((heap_chunk_head *) mem) [-1]).size = malloc_request;
  Assert (Wosize_bhsize (malloc_request) >= request);
  Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Blue);

#ifndef SIXTEEN
  if (mem < heap_start){
    /* This is WRONG, Henning Niss 2005: */
    more_pages = -Page (mem);
  }else if (Page (mem + malloc_request) > page_table_size){
    Assert (mem >= heap_end);
    more_pages = Page (mem + malloc_request) - page_table_size;
  }else{
    more_pages = 0;
  }

  if (more_pages != 0){
    new_page_table_size = page_table_size + more_pages;
    new_page_table = (char *) malloc (new_page_table_size);
    if (new_page_table == NULL){
      gc_message ("No room for growing page table\n", 0);
      free (mem);
      return NULL;
    }
  }

  if (mem < heap_start){
    Assert (more_pages != 0);
    for (i = 0; i < more_pages; i++){
      new_page_table [i] = Not_in_heap;
    }
    bcopy (page_table, new_page_table + more_pages, page_table_size);
    (((heap_chunk_head *) mem) [-1]).next = heap_start;
    heap_start = mem;
  }else{
    char **last;
    char *cur;

    if (mem >= heap_end) heap_end = mem + malloc_request;
    if (more_pages != 0){
      for (i = page_table_size; i < new_page_table_size; i++){
        new_page_table [i] = Not_in_heap;
      }
      bcopy (page_table, new_page_table, page_table_size);
    }
    last = &heap_start;
    cur = *last;
    while (cur != NULL && cur < mem){
      last = &((((heap_chunk_head *) cur) [-1]).next);
      cur = *last;
    }
    (((heap_chunk_head *) mem) [-1]).next = cur;
    *last = mem;
  }

  if (more_pages != 0){
    free (page_table);
    page_table = new_page_table;
    page_table_size = new_page_table_size;
  }
#else                           /* Simplified version for the 8086 */
  {
    char **last;
    char *cur;

    last = &heap_start;
    cur = *last;
    while (cur != NULL && (char huge *) cur < (char huge *) mem){
      last = &((((heap_chunk_head *) cur) [-1]).next);
      cur = *last;
    }
    (((heap_chunk_head *) mem) [-1]).next = cur;
    *last = mem;
  }
#endif

  for (i = Page (mem); i < Page (mem + malloc_request); i++){
    page_table [i] = In_heap;
  }
  stat_heap_size += malloc_request;
  return Bp_hp (mem);
}
Пример #5
0
/* Allocate more memory from malloc for the heap.
   Return a block of at least the requested size (in words).
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
	char *mem;
	char *new_page_table = NULL;
	size_t new_page_table_size = 0;
	size_t malloc_request;
	size_t i, more_pages;

	malloc_request = round_heap_chunk_size (Bhsize_wosize (request));
	gc_message ("Growing heap to %ldk\n",
		    (stat_heap_size + malloc_request) / 1024);
	mem = aligned_malloc (malloc_request + sizeof (heap_chunk_head),
			      sizeof (heap_chunk_head));
	if (mem == NULL){
		gc_message ("No room for growing heap\n", 0);
		return NULL;
	}

	mem += sizeof (heap_chunk_head);
	(((heap_chunk_head *) mem) [-1]).size = malloc_request;
	assert (Wosize_bhsize (malloc_request) >= request);
	Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Blue);

	/* The else if check here can never be negative since have mem >= heap_start
	 * so the Page calculation will be positive. Hence the (unsigned) cast is valid
	 */
	if (mem < heap_start) {
		more_pages = -Page (mem);
	} else if ((unsigned) Page(mem + malloc_request) > page_table_size) {
		assert (mem >= heap_end);
		more_pages = Page (mem + malloc_request) - page_table_size;
	} else {
		more_pages = 0;
	}

	if (more_pages != 0) {
		new_page_table_size = page_table_size + more_pages;
		new_page_table = (char *) malloc (new_page_table_size);
		if (new_page_table == NULL){
			gc_message ("No room for growing page table\n", 0);
			free (mem);
			return NULL;
		}
	}

	if (mem < heap_start) {
		assert (more_pages != 0);
		for (i = 0; i < more_pages; i++){
			new_page_table [i] = Not_in_heap;
		}
		bcopy (page_table, new_page_table + more_pages, page_table_size);
		(((heap_chunk_head *) mem) [-1]).next = heap_start;
		heap_start = mem;
	} else {
		char **last;
		char *cur;

		if (mem >= heap_end) heap_end = mem + malloc_request;
		if (more_pages != 0) {
			for (i = page_table_size; i < new_page_table_size; i++) {
				new_page_table [i] = Not_in_heap;
			}
			bcopy (page_table, new_page_table, page_table_size);
		}
		last = &heap_start;
		cur = *last;
		while (cur != NULL && cur < mem){
			last = &((((heap_chunk_head *) cur) [-1]).next);
			cur = *last;
		}
		(((heap_chunk_head *) mem) [-1]).next = cur;
		*last = mem;
	}

	if (more_pages != 0) {
		free (page_table);
		page_table = new_page_table;
		page_table_size = new_page_table_size;
	}

	for (i = Page (mem); i < (unsigned) Page (mem + malloc_request); i++){
		page_table [i] = In_heap;
	}
	stat_heap_size += malloc_request;
	return Bp_hp (mem);
}