Exemplo n.º 1
0
static void invert_pointer_at (word *p)
{
    word q = *p;
    Assert (Ecolor ((intnat) p) == 0);

    /* Use Ecolor (q) == 0 instead of Is_block (q) because q could be an
       inverted pointer for an infix header (with Ecolor == 2). */
    if (Ecolor (q) == 0 && (Classify_addr (q) & In_heap)) {
        switch (Ecolor (Hd_val (q))) {
        case 0:
        case 3: /* Pointer or header: insert in inverted list. */
            *p = Hd_val (q);
            Hd_val (q) = (header_t) p;
            break;
        case 1: /* Infix header: make inverted infix list. */
            /* Double inversion: the last of the inverted infix list points to
               the next infix header in this block.  The last of the last list
               contains the original block header. */
        {
            /* This block as a value. */
            value val = (value) q - Infix_offset_val (q);
            /* Get the block header. */
            word *hp = (word *) Hp_val (val);

            while (Ecolor (*hp) == 0) hp = (word *) *hp;
            Assert (Ecolor (*hp) == 3);
            if (Tag_ehd (*hp) == Closure_tag) {
                /* This is the first infix found in this block. */
                /* Save original header. */
                *p = *hp;
                /* Link inverted infix list. */
                Hd_val (q) = (header_t) ((word) p | 2);
                /* Change block header's tag to Infix_tag, and change its size
                   to point to the infix list. */
                *hp = Make_ehd (Wosize_bhsize (q - val), Infix_tag, 3);
            } else {
                Assert (Tag_ehd (*hp) == Infix_tag);
                /* Point the last of this infix list to the current first infix
                   list of the block. */
                *p = (word) &Field (val, Wosize_ehd (*hp)) | 1;
                /* Point the head of this infix list to the above. */
                Hd_val (q) = (header_t) ((word) p | 2);
                /* Change block header's size to point to this infix list. */
                *hp = Make_ehd (Wosize_bhsize (q - val), Infix_tag, 3);
            }
        }
        break;
        case 2: /* Inverted infix list: insert. */
            *p = Hd_val (q);
            Hd_val (q) = (header_t) ((word) p | 2);
            break;
        }
    }
}
Exemplo n.º 2
0
value caml_aligned_array_create(size_t alignment, value len)
{
  CAMLparam1 (len);

  void* bp;
  mlsize_t bosize;
  int result;

  bosize = (Int_val(len) + 1) * alignment;
  result = posix_memalign(&bp, alignment, bosize);
  if (result != 0)
  {
    if (result == EINVAL)
      caml_failwith(
        "The alignment was not a power of two, or was not a multiple of sizeof(void *)");
    else if (result == ENOMEM)
      caml_raise_out_of_memory();
    else
      caml_failwith("Unrecognized error");
  }

  /* Leave space for the header */
  bp += alignment;
  
  Hd_bp (bp) =
    Make_header (Wosize_bhsize(Bhsize_bosize(bosize - alignment)),
                 Double_array_tag, Caml_white);

  CAMLreturn (Val_bp(bp));
}
Exemplo n.º 3
0
/* Allocate more memory from malloc for the heap.
   Return a blue block of at least the requested size.
   The blue block is chained to a sequence of blue blocks (through their
   field 0); the last block of the chain is pointed by field 1 of the
   first.  There may be a fragment after the last block.
   The caller must insert the blocks into the free list.
   The request must be less than or equal to Max_wosize.
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
  char *mem, *hp, *prev;
  asize_t over_request, malloc_request, remain;

  Assert (request <= Max_wosize);
  over_request = request + request / 100 * caml_percent_free;
  malloc_request = caml_round_heap_chunk_size (Bhsize_wosize (over_request));
  mem = caml_alloc_for_heap (malloc_request);
  if (mem == NULL){
    caml_gc_message (0x04, "No room for growing heap\n", 0);
    return NULL;
  }
  remain = malloc_request;
  prev = hp = mem;
  /* FIXME find a way to do this with a call to caml_make_free_blocks */
  while (Wosize_bhsize (remain) > Max_wosize){
    Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue);
#ifdef DEBUG
    caml_set_fields (Bp_hp (hp), 0, Debug_free_major);
#endif
    hp += Bhsize_wosize (Max_wosize);
    remain -= Bhsize_wosize (Max_wosize);
    Field (Op_hp (mem), 1) = Field (Op_hp (prev), 0) = (value) Op_hp (hp);
    prev = hp;
  }
  if (remain > 1){
    Hd_hp (hp) = Make_header (Wosize_bhsize (remain), 0, Caml_blue);
#ifdef DEBUG
    caml_set_fields (Bp_hp (hp), 0, Debug_free_major);
#endif
    Field (Op_hp (mem), 1) = Field (Op_hp (prev), 0) = (value) Op_hp (hp);
    Field (Op_hp (hp), 0) = (value) NULL;
  }else{
    Field (Op_hp (prev), 0) = (value) NULL;
    if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white);
  }
  Assert (Wosize_hp (mem) >= request);
  if (caml_add_to_heap (mem) != 0){
    caml_free_for_heap (mem);
    return NULL;
  }
  return Bp_hp (mem);
}
Exemplo n.º 4
0
/* Allocate more memory from malloc for the heap.
   Return a blue block of at least the requested size (in words).
   The caller must insert the block into the free list.
   The request must be less than or equal to Max_wosize.
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
  char *mem;
  asize_t malloc_request;

  malloc_request = caml_round_heap_chunk_size (Bhsize_wosize (request));
  mem = caml_alloc_for_heap (malloc_request);
  if (mem == NULL){
    caml_gc_message (0x04, "No room for growing heap\n", 0);
    return NULL;
  }
  Assert (Wosize_bhsize (malloc_request) >= request);
  Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Caml_blue);

  if (caml_add_to_heap (mem) != 0){
    caml_free_for_heap (mem);
    return NULL;
  }
  return Bp_hp (mem);
}
value netsys_copy_value(value flags, value orig)
{
    int code;
    int cflags;
    intnat start_offset, bytelen;
    mlsize_t wosize;
    char *dest, *dest_end, *extra_block, *extra_block_end;
    int color;
    struct named_custom_ops bigarray_ops;
    struct named_custom_ops int32_ops;
    struct named_custom_ops int64_ops;
    struct named_custom_ops nativeint_ops;
    CAMLparam2(orig,flags);
    CAMLlocal1(block);

    /* First test on trivial cases: */
    if (Is_long(orig) || Wosize_val(orig) == 0) {
	CAMLreturn(orig);
    };

    code = prep_stat_tab();
    if (code != 0) goto exit;

    code = prep_stat_queue();
    if (code != 0) goto exit;

    cflags = caml_convert_flag_list(flags, init_value_flags);

    /* fprintf (stderr, "counting\n"); */

    /* Count only! */
    code = netsys_init_value_1(stat_tab, stat_queue, NULL, NULL, orig, 
			       (cflags & 1) ? 1 : 0,  /* enable_bigarrays */
			       (cflags & 2) ? 1 : 0,  /* enable_customs */
			       1, /* enable_atoms */
			       1, /* simulate */
			       NULL, NULL, 0, &start_offset, &bytelen);
    if (code != 0) goto exit;

    /* fprintf (stderr, "done counting bytelen=%ld\n", bytelen); */

    /* set up the custom ops. We always set this, because we assume that
       the values in [orig] are not trustworthy
    */
    bigarray_ops.name = "_bigarray";
    bigarray_ops.ops = 
	Custom_ops_val(alloc_bigarray_dims(CAML_BA_UINT8 | BIGARRAY_C_LAYOUT, 
					   1, NULL, 1));
    bigarray_ops.next = &int32_ops;

    int32_ops.name = "_i";
    int32_ops.ops = Custom_ops_val(caml_copy_int32(0));
    int32_ops.next = &int64_ops;

    int64_ops.name = "_j";
    int64_ops.ops = Custom_ops_val(caml_copy_int64(0));
    int64_ops.next = &nativeint_ops;

    nativeint_ops.name = "_n";
    nativeint_ops.ops = Custom_ops_val(caml_copy_nativeint(0));
    nativeint_ops.next = NULL;

    /* alloc */

    extra_block = NULL;
    extra_block_end = NULL;

    /* shamelessly copied from intern.c */
    wosize = Wosize_bhsize(bytelen);
    /* fprintf (stderr, "wosize=%ld\n", wosize); */
    if (wosize > Max_wosize) {
	/* Round desired size up to next page */
	asize_t request = ((bytelen + Page_size - 1) >> Page_log) << Page_log;
	extra_block = caml_alloc_for_heap(request);
	if (extra_block == NULL) caml_raise_out_of_memory();
	extra_block_end = extra_block + request;
	color = caml_allocation_color(extra_block);
	dest = extra_block;
	dest_end = dest + bytelen;
	block = Val_hp(extra_block);
    } else {
Exemplo n.º 6
0
/* Allocate more memory from malloc for the heap.
   Return a block of at least the requested size (in words).
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
  char *mem;
  char *new_page_table = NULL;
  asize_t new_page_table_size = 0;
  asize_t malloc_request;
  asize_t i, more_pages;

  malloc_request = round_heap_chunk_size (Bhsize_wosize (request));
  gc_message ("Growing heap to %ldk\n",
	      (stat_heap_size + malloc_request) / 1024);
  mem = aligned_malloc (malloc_request + sizeof (heap_chunk_head),
                        sizeof (heap_chunk_head));
  if (mem == NULL){
    gc_message ("No room for growing heap\n", 0);
    return NULL;
  }
  mem += sizeof (heap_chunk_head);
  (((heap_chunk_head *) mem) [-1]).size = malloc_request;
  Assert (Wosize_bhsize (malloc_request) >= request);
  Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Blue);

#ifndef SIXTEEN
  if (mem < heap_start){
    /* This is WRONG, Henning Niss 2005: */
    more_pages = -Page (mem);
  }else if (Page (mem + malloc_request) > page_table_size){
    Assert (mem >= heap_end);
    more_pages = Page (mem + malloc_request) - page_table_size;
  }else{
    more_pages = 0;
  }

  if (more_pages != 0){
    new_page_table_size = page_table_size + more_pages;
    new_page_table = (char *) malloc (new_page_table_size);
    if (new_page_table == NULL){
      gc_message ("No room for growing page table\n", 0);
      free (mem);
      return NULL;
    }
  }

  if (mem < heap_start){
    Assert (more_pages != 0);
    for (i = 0; i < more_pages; i++){
      new_page_table [i] = Not_in_heap;
    }
    bcopy (page_table, new_page_table + more_pages, page_table_size);
    (((heap_chunk_head *) mem) [-1]).next = heap_start;
    heap_start = mem;
  }else{
    char **last;
    char *cur;

    if (mem >= heap_end) heap_end = mem + malloc_request;
    if (more_pages != 0){
      for (i = page_table_size; i < new_page_table_size; i++){
        new_page_table [i] = Not_in_heap;
      }
      bcopy (page_table, new_page_table, page_table_size);
    }
    last = &heap_start;
    cur = *last;
    while (cur != NULL && cur < mem){
      last = &((((heap_chunk_head *) cur) [-1]).next);
      cur = *last;
    }
    (((heap_chunk_head *) mem) [-1]).next = cur;
    *last = mem;
  }

  if (more_pages != 0){
    free (page_table);
    page_table = new_page_table;
    page_table_size = new_page_table_size;
  }
#else                           /* Simplified version for the 8086 */
  {
    char **last;
    char *cur;

    last = &heap_start;
    cur = *last;
    while (cur != NULL && (char huge *) cur < (char huge *) mem){
      last = &((((heap_chunk_head *) cur) [-1]).next);
      cur = *last;
    }
    (((heap_chunk_head *) mem) [-1]).next = cur;
    *last = mem;
  }
#endif

  for (i = Page (mem); i < Page (mem + malloc_request); i++){
    page_table [i] = In_heap;
  }
  stat_heap_size += malloc_request;
  return Bp_hp (mem);
}
Exemplo n.º 7
0
/* Allocate more memory from malloc for the heap.
   Return a block of at least the requested size (in words).
   Return NULL when out of memory.
*/
static char *expand_heap (mlsize_t request)
{
	char *mem;
	char *new_page_table = NULL;
	size_t new_page_table_size = 0;
	size_t malloc_request;
	size_t i, more_pages;

	malloc_request = round_heap_chunk_size (Bhsize_wosize (request));
	gc_message ("Growing heap to %ldk\n",
		    (stat_heap_size + malloc_request) / 1024);
	mem = aligned_malloc (malloc_request + sizeof (heap_chunk_head),
			      sizeof (heap_chunk_head));
	if (mem == NULL){
		gc_message ("No room for growing heap\n", 0);
		return NULL;
	}

	mem += sizeof (heap_chunk_head);
	(((heap_chunk_head *) mem) [-1]).size = malloc_request;
	assert (Wosize_bhsize (malloc_request) >= request);
	Hd_hp (mem) = Make_header (Wosize_bhsize (malloc_request), 0, Blue);

	/* The else if check here can never be negative since have mem >= heap_start
	 * so the Page calculation will be positive. Hence the (unsigned) cast is valid
	 */
	if (mem < heap_start) {
		more_pages = -Page (mem);
	} else if ((unsigned) Page(mem + malloc_request) > page_table_size) {
		assert (mem >= heap_end);
		more_pages = Page (mem + malloc_request) - page_table_size;
	} else {
		more_pages = 0;
	}

	if (more_pages != 0) {
		new_page_table_size = page_table_size + more_pages;
		new_page_table = (char *) malloc (new_page_table_size);
		if (new_page_table == NULL){
			gc_message ("No room for growing page table\n", 0);
			free (mem);
			return NULL;
		}
	}

	if (mem < heap_start) {
		assert (more_pages != 0);
		for (i = 0; i < more_pages; i++){
			new_page_table [i] = Not_in_heap;
		}
		bcopy (page_table, new_page_table + more_pages, page_table_size);
		(((heap_chunk_head *) mem) [-1]).next = heap_start;
		heap_start = mem;
	} else {
		char **last;
		char *cur;

		if (mem >= heap_end) heap_end = mem + malloc_request;
		if (more_pages != 0) {
			for (i = page_table_size; i < new_page_table_size; i++) {
				new_page_table [i] = Not_in_heap;
			}
			bcopy (page_table, new_page_table, page_table_size);
		}
		last = &heap_start;
		cur = *last;
		while (cur != NULL && cur < mem){
			last = &((((heap_chunk_head *) cur) [-1]).next);
			cur = *last;
		}
		(((heap_chunk_head *) mem) [-1]).next = cur;
		*last = mem;
	}

	if (more_pages != 0) {
		free (page_table);
		page_table = new_page_table;
		page_table_size = new_page_table_size;
	}

	for (i = Page (mem); i < (unsigned) Page (mem + malloc_request); i++){
		page_table [i] = In_heap;
	}
	stat_heap_size += malloc_request;
	return Bp_hp (mem);
}