예제 #1
0
CAMLexport void * caml_stat_alloc (asize_t sz)
{
  void* result = malloc (sizeof(value) + sz);
  if (result == NULL)
    caml_raise_out_of_memory();
  Hd_hp(result) = Make_header(STAT_ALLOC_MAGIC, Abstract_tag, NOT_MARKABLE);
#ifdef DEBUG
  memset ((void*)Val_hp(result), Debug_uninit_stat, sz);
#endif
  return (void*)Val_hp(result);
}
예제 #2
0
파일: gc_ctrl.c 프로젝트: rizo/snowflake-os
static void check_block (char *hp)
{
    mlsize_t i;
    value v = Val_hp (hp);
    value f;

    check_head (v);
    switch (Tag_hp (hp)) {
    case Abstract_tag:
        break;
    case String_tag:
        break;
    case Double_tag:
        Assert (Wosize_val (v) == Double_wosize);
        break;
    case Double_array_tag:
        Assert (Wosize_val (v) % Double_wosize == 0);
        break;
    case Custom_tag:
        Assert (!Is_in_heap (Custom_ops_val (v)));
        break;

    case Infix_tag:
        Assert (0);
        break;

    default:
        Assert (Tag_hp (hp) < No_scan_tag);
        for (i = 0; i < Wosize_hp (hp); i++) {
            f = Field (v, i);
            if (Is_block (f) && Is_in_heap (f)) check_head (f);
        }
    }
}
예제 #3
0
value* caml_shared_try_alloc(struct caml_heap_state* local, mlsize_t wosize,
                             tag_t tag, int pinned)
{
  mlsize_t whsize = Whsize_wosize(wosize);
  value* p;
  uintnat colour;

  Assert (wosize > 0);
  Assert (tag != Infix_tag);
  if (whsize <= SIZECLASS_MAX) {
    sizeclass sz = sizeclass_wsize[whsize];
    Assert(wsize_sizeclass[sz] >= whsize);
    p = pool_allocate(local, sz);
    if (!p) return 0;
    struct heap_stats* s = &local->stats;
    s->pool_live_blocks++;
    s->pool_live_words += whsize;
    s->pool_frag_words += wsize_sizeclass[sz] - whsize;
  } else {
    p = large_allocate(local, Bsize_wsize(whsize));
    if (!p) return 0;
  }
  colour = pinned ? NOT_MARKABLE : global.MARKED;
  Hd_hp (p) = Make_header(wosize, tag, colour);
#ifdef DEBUG
  {
    int i;
    for (i = 0; i < wosize; i++) {
      Op_val(Val_hp(p))[i] = Debug_free_major;
    }
  }
#endif
  return p;
}
예제 #4
0
파일: memory.c 프로젝트: dhil/ocaml-effects
CAMLexport value caml_alloc_shr (mlsize_t wosize, tag_t tag)
{
  header_t *hp;
  value *new_block;

  if (wosize > Max_wosize) caml_raise_out_of_memory ();
  hp = caml_fl_allocate (wosize);
  if (hp == NULL){
    new_block = expand_heap (wosize);
    if (new_block == NULL) {
      if (caml_in_minor_collection)
        caml_fatal_error ("Fatal error: out of memory.\n");
      else
        caml_raise_out_of_memory ();
    }
    caml_fl_add_blocks ((value) new_block);
    hp = caml_fl_allocate (wosize);
  }

  Assert (Is_in_heap (Val_hp (hp)));

  /* Inline expansion of caml_allocation_color. */
  if (caml_gc_phase == Phase_mark
      || (caml_gc_phase == Phase_sweep && (addr)hp >= (addr)caml_gc_sweep_hp)){
    Hd_hp (hp) = Make_header (wosize, tag, Caml_black);
  }else{
    Assert (caml_gc_phase == Phase_idle
            || (caml_gc_phase == Phase_sweep
                && (addr)hp < (addr)caml_gc_sweep_hp));
    Hd_hp (hp) = Make_header (wosize, tag, Caml_white);
  }
  Assert (Hd_hp (hp) == Make_header (wosize, tag, caml_allocation_color (hp)));
  caml_allocated_words += Whsize_wosize (wosize);
  if (caml_allocated_words > caml_minor_heap_wsz){
    caml_urge_major_slice ();
  }
#ifdef DEBUG
  {
    uintnat i;
    for (i = 0; i < wosize; i++){
      Field (Val_hp (hp), i) = Debug_uninit_major;
    }
  }
#endif
  return Val_hp (hp);
}
예제 #5
0
파일: memory.c 프로젝트: dhil/ocaml-effects
/* Allocate more memory from malloc for the heap.
   Return a blue block of at least the requested size.
   The blue block is chained to a sequence of blue blocks (through their
   field 0); the last block of the chain is pointed by field 1 of the
   first.  There may be a fragment after the last block.
   The caller must insert the blocks into the free list.
   [request] is a number of words and must be less than or equal
   to [Max_wosize].
   Return NULL when out of memory.
*/
static value *expand_heap (mlsize_t request)
{
  /* these point to headers, but we do arithmetic on them, hence [value *]. */
  value *mem, *hp, *prev;
  asize_t over_request, malloc_request, remain;

  Assert (request <= Max_wosize);
  over_request = Whsize_wosize (request + request / 100 * caml_percent_free);
  malloc_request = caml_round_heap_chunk_wsz (over_request);
  mem = (value *) caml_alloc_for_heap (Bsize_wsize (malloc_request));
  if (mem == NULL){
    caml_gc_message (0x04, "No room for growing heap\n", 0);
    return NULL;
  }
  remain = malloc_request;
  prev = hp = mem;
  /* FIXME find a way to do this with a call to caml_make_free_blocks */
  while (Wosize_whsize (remain) > Max_wosize){
    Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue);
#ifdef DEBUG
    caml_set_fields (Val_hp (hp), 0, Debug_free_major);
#endif
    hp += Whsize_wosize (Max_wosize);
    remain -= Whsize_wosize (Max_wosize);
    Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp);
    prev = hp;
  }
  if (remain > 1){
    Hd_hp (hp) = Make_header (Wosize_whsize (remain), 0, Caml_blue);
#ifdef DEBUG
    caml_set_fields (Val_hp (hp), 0, Debug_free_major);
#endif
    Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp);
    Field (Val_hp (hp), 0) = (value) NULL;
  }else{
    Field (Val_hp (prev), 0) = (value) NULL;
    if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white);
  }
  Assert (Wosize_hp (mem) >= request);
  if (caml_add_to_heap ((char *) mem) != 0){
    caml_free_for_heap ((char *) mem);
    return NULL;
  }
  return Op_hp (mem);
}
예제 #6
0
static value alloc_shared(mlsize_t wosize, tag_t tag)
{
  void* mem = caml_shared_try_alloc(caml_domain_self()->shared_heap, wosize, tag, 0 /* not promotion */);
  caml_domain_state->allocated_words += Whsize_wosize(wosize);
  if (mem == NULL) {
    caml_fatal_error("allocation failure during minor GC");
  }
  return Val_hp(mem);
}
예제 #7
0
static value caml_promote_one(struct promotion_stack* stk, struct domain* domain, value curr)
{
  header_t curr_block_hd;
  int infix_offset = 0;
  if (Is_long(curr) || !Is_minor(curr))
    return curr; /* needs no promotion */

  Assert(caml_owner_of_young_block(curr) == domain);

  curr_block_hd = Hd_val(curr);

  if (Tag_hd(curr_block_hd) == Infix_tag) {
    infix_offset = Infix_offset_val(curr);
    curr -= infix_offset;
    curr_block_hd = Hd_val(curr);
  }

  if (Is_promoted_hd(curr_block_hd)) {
    /* already promoted */
    return caml_addrmap_lookup(&domain->state->remembered_set->promotion, curr) + infix_offset;
  } else if (curr_block_hd == 0) {
    /* promoted by minor GC */
    return Op_val(curr)[0] + infix_offset;
  }

  /* otherwise, must promote */
  void* mem = caml_shared_try_alloc(domain->shared_heap, Wosize_hd(curr_block_hd),
                                           Tag_hd(curr_block_hd), 1);
  if (!mem) caml_fatal_error("allocation failure during promotion");
  value promoted = Val_hp(mem);
  Hd_val(curr) = Promotedhd_hd(curr_block_hd);

  caml_addrmap_insert(&domain->state->remembered_set->promotion, curr, promoted);
  caml_addrmap_insert(&domain->state->remembered_set->promotion_rev, promoted, curr);

  if (Tag_hd(curr_block_hd) >= No_scan_tag) {
    int i;
    for (i = 0; i < Wosize_hd(curr_block_hd); i++)
      Op_val(promoted)[i] = Op_val(curr)[i];
  } else {
    /* push to stack */
    if (stk->sp == stk->stack_len) {
      stk->stack_len = 2 * (stk->stack_len + 10);
      stk->stack = caml_stat_resize(stk->stack,
          sizeof(struct promotion_stack_entry) * stk->stack_len);
    }
    stk->stack[stk->sp].local = curr;
    stk->stack[stk->sp].global = promoted;
    stk->stack[stk->sp].field = 0;
    stk->sp++;
  }
  return promoted + infix_offset;
}
예제 #8
0
void caml_redarken_pool(struct pool* r, scanning_action f, void* fdata) {
  mlsize_t wh = wsize_sizeclass[r->sz];
  value* p = (value*)((char*)r + POOL_HEADER_SZ);
  value* end = (value*)((char*)r + Bsize_wsize(POOL_WSIZE));

  while (p + wh <= end) {
    header_t hd = p[0];
    if (hd != 0 && Has_status_hd(hd, global.MARKED)) {
      f(fdata, Val_hp(p), 0);
    }
    p += wh;
  }
}
예제 #9
0
static void lm_heap_check_aux2(char *name)
{
    char *start, *ptr, *end;
    char *v;
    header_t hd;
    mlsize_t size;
    unsigned i;

    start = caml_young_start;
    ptr = caml_young_ptr;
    end = caml_young_end;

    fprintf(stderr, "AAA: %s: [0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx] (%ld/%ld/%ld bytes)\n",
            name,
            (unsigned long) caml_young_start,
            (unsigned long) caml_young_ptr,
            (unsigned long) caml_young_limit,
            (unsigned long) caml_young_end, 
            ((unsigned long) caml_young_end) - (unsigned long) caml_young_ptr,
            ((unsigned long) caml_young_end) - (unsigned long) caml_young_limit,
            ((unsigned long) caml_young_end) - (unsigned long) caml_young_start);

    fflush(stderr);

    /*
     * Phase 1: check that the headers have the right sizes.
     */
    v = (char *) Val_hp(caml_young_ptr);
    while(v < caml_young_end) {
        hd = Hd_val(v);
        if(hd == Debug_free_minor) {
            fprintf(stderr, "Bogus pointer: 0x%08lx\n", (unsigned long) v);
            fflush(stderr);
            v += sizeof(header_t);
        }
        else {
            size = Wosize_val(v);
            for(i = 0; i != size; i++) {
                char *p = (char *) Field(v, i);
                if(p >= caml_young_end && p < caml_young_ptr) {
                    fprintf(stderr, "%s: Found a bogus pointer: 0x%08lx[%d] = 0x%08lx\n",
                            name, (unsigned long) v, i, (unsigned long) p);
                    fflush(stderr);
                    abort();
                }
            }
            v = (char *) &Field(v, size + 1);
        }
    }
}
예제 #10
0
파일: memory.c 프로젝트: bluegnu/mosml
EXTERN value alloc_shr (mlsize_t wosize, tag_t tag)
{
  char *hp, *new_block;

  hp = fl_allocate (wosize);
  if (hp == NULL){
    new_block = expand_heap (wosize);
    if (new_block == NULL) raise_out_of_memory ();
    fl_add_block (new_block);
    hp = fl_allocate (wosize);
    if (hp == NULL) fatal_error ("alloc_shr: expand heap failed\n");
  }

  Assert (Is_in_heap (Val_hp (hp)));

  if (gc_phase == Phase_mark || (addr)hp >= (addr)gc_sweep_hp){
    Hd_hp (hp) = Make_header (wosize, tag, Black);
  }else{
    Hd_hp (hp) = Make_header (wosize, tag, White);
  }
  allocated_words += Whsize_wosize (wosize);
  if (allocated_words > Wsize_bsize (minor_heap_size)) force_minor_gc ();
  return Val_hp (hp);
}
예제 #11
0
CAMLexport void * caml_stat_resize (void * p, asize_t sz)
{
  void * result;

  if (p == NULL)
    return caml_stat_alloc(sz);

  result = realloc (Hp_val((value)p), sizeof(value) + sz);

  if (result == NULL) {
    caml_stat_free(p);
    caml_raise_out_of_memory ();
  }

  return (void*)Val_hp(result);
}
예제 #12
0
/* Cut a block of memory into Max_wosize pieces, give them headers,
   and optionally merge them into the free list.
   arguments:
   p: pointer to the first word of the block
   size: size of the block (in words)
   do_merge: 1 -> do merge; 0 -> do not merge
   color: which color to give to the pieces; if [do_merge] is 1, this
          is overridden by the merge code, but we have historically used
          [Caml_white].
*/
void caml_make_free_blocks (value *p, mlsize_t size, int do_merge, int color)
{
  mlsize_t sz;

  while (size > 0){
    if (size > Whsize_wosize (Max_wosize)){
      sz = Whsize_wosize (Max_wosize);
    }else{
      sz = size;
    }
    *(header_t *)p = Make_header (Wosize_whsize (sz), 0, color);
    if (do_merge) caml_fl_merge_block (Val_hp (p));
    size -= sz;
    p += sz;
  }
}
예제 #13
0
static value next_minor_block(caml_domain_state* domain_state, value curr_hp)
{
  mlsize_t wsz;
  header_t hd;
  value curr_val;
  CAMLassert ((value)domain_state->young_ptr <= curr_hp);
  CAMLassert (curr_hp < (value)domain_state->young_end);
  hd = Hd_hp(curr_hp);
  curr_val = Val_hp(curr_hp);
  if (hd == 0) {
    /* Forwarded object, find the promoted version */
    curr_val = Op_val(curr_val)[0];
  }
  CAMLassert (Is_block(curr_val) && Hd_val(curr_val) != 0 && Tag_val(curr_val) != Infix_tag);
  wsz = Wosize_val(curr_val);
  CAMLassert (wsz <= Max_young_wosize);
  return curr_hp + Bsize_wsize(Whsize_wosize(wsz));
}
예제 #14
0
value* caml_shared_try_alloc(struct caml_heap_state* local, mlsize_t wosize, tag_t tag, int pinned) {
  mlsize_t whsize = Whsize_wosize(wosize);
  value* p;
  Assert (wosize > 0);
  Assert (tag != Infix_tag);
  if (whsize <= SIZECLASS_MAX) {
    p = pool_allocate(local, sizeclass_wsize[whsize]);
  } else {
    p = large_allocate(local, Bsize_wsize(whsize));
  }
  if (!p) return 0;
  Hd_hp (p) = Make_header(wosize, tag, pinned ? NOT_MARKABLE : global.UNMARKED);
#ifdef DEBUG
  {
    int i;
    for (i = 0; i < wosize; i++) {
      Op_val(Val_hp(p))[i] = Debug_free_major;
    }
  }
#endif
  return p;
}
예제 #15
0
static value promote_stack(struct domain* domain, value stack)
{
  caml_gc_log("Promoting stack");
  Assert(Tag_val(stack) == Stack_tag);
  if (Is_minor(stack)) {
    /* First, promote the actual stack object */
    Assert(caml_owner_of_young_block(stack) == domain);
    /* Stacks are only referenced via fibers, so we don't bother
       using the promotion_table */
    void* new_stack = caml_shared_try_alloc(domain->shared_heap, Wosize_val(stack), Stack_tag, 0);
    if (!new_stack) caml_fatal_error("allocation failure during stack promotion");
    memcpy(Op_hp(new_stack), (void*)stack, Wosize_val(stack) * sizeof(value));
    stack = Val_hp(new_stack);
  }

  /* Promote each object on the stack. */
  promote_domain = domain;
  caml_scan_stack(&promote_stack_elem, stack);
  /* Since we've promoted the objects on the stack, the stack is now clean. */
  caml_clean_stack_domain(stack, domain);
  return stack;
}
int netsys_init_value_1(struct htab *t,
			struct nqueue *q,
			char *dest,
			char *dest_end,
			value orig,  
			int enable_bigarrays, 
			int enable_customs,
			int enable_atoms,
			int simulation,
			void *target_addr,
			struct named_custom_ops *target_custom_ops,
			int color,
			intnat *start_offset,
			intnat *bytelen
			)
{
    void *orig_addr;
    void *work_addr;
    value work;
    int   work_tag;
    char *work_header;
    size_t work_bytes;
    size_t work_words;
    void *copy_addr;
    value copy;
    char *copy_header;
    header_t copy_header1;
    int   copy_tag;
    size_t copy_words;
    void *fixup_addr;
    char *dest_cur;
    char *dest_ptr;
    int code, i;
    intnat addr_delta;
    struct named_custom_ops *ops_ptr;
    void *int32_target_ops;
    void *int64_target_ops;
    void *nativeint_target_ops;
    void *bigarray_target_ops;

    copy = 0;

    dest_cur = dest;
    addr_delta = ((char *) target_addr) - dest;

    if (dest_cur >= dest_end && !simulation) return (-4);   /* out of space */

    if (!Is_block(orig)) return (-2);

    orig_addr = (void *) orig;
    code = netsys_queue_add(q, orig_addr);
    if (code != 0) return code;

    /* initialize *_target_ops */
    bigarray_target_ops = NULL;
    int32_target_ops = NULL;
    int64_target_ops = NULL;
    nativeint_target_ops = NULL;
    ops_ptr = target_custom_ops;
    while (ops_ptr != NULL) {
	if (strcmp(ops_ptr->name, "_bigarray") == 0)
	    bigarray_target_ops = ops_ptr->ops;
	else if (strcmp(ops_ptr->name, "_i") == 0)
	    int32_target_ops = ops_ptr->ops;
	else if (strcmp(ops_ptr->name, "_j") == 0)
	    int64_target_ops = ops_ptr->ops;
	else if (strcmp(ops_ptr->name, "_n") == 0)
	    nativeint_target_ops = ops_ptr->ops;
	ops_ptr = ops_ptr->next;
    };

    /* First pass: Iterate over the addresses found in q. Ignore
       addresses already seen in the past (which are in t). For
       new addresses, make a copy, and add these copies to t.
    */

    /* fprintf(stderr, "first pass, orig_addr=%lx simulation=%d addr_delta=%lx\n",
       (unsigned long) orig_addr, simulation, addr_delta);
    */

    code = netsys_queue_take(q, &work_addr);
    while (code != (-3)) {
	if (code != 0) return code;

	/* fprintf(stderr, "work_addr=%lx\n", (unsigned long) work_addr); */

	code = netsys_htab_lookup(t, work_addr, &copy_addr);
	if (code != 0) return code;

	if (copy_addr == NULL) {
	    /* The address is unknown, so copy the value */

	    /* Body of first pass */
	    work = (value) work_addr;
	    work_tag = Tag_val(work);
	    work_header = Hp_val(work);
	    
	    if (work_tag < No_scan_tag) {
		/* It is a scanned value (with subvalues) */
		
		switch(work_tag) {
		case Object_tag:
		case Closure_tag:
		case Lazy_tag:
		case Forward_tag:
		    return (-2);   /* unsupported */
		}

		work_words = Wosize_hp(work_header);
		if (work_words == 0) {
		    if (!enable_atoms) return (-2);
		    if (enable_atoms == 1) goto next;
		};
		
		/* Do the copy. */

		work_bytes = Bhsize_hp(work_header);
		copy_header = dest_cur;
		dest_cur += work_bytes;
		if (dest_cur > dest_end && !simulation) return (-4);
		
		if (simulation) 
		    copy_addr = work_addr;
		else {
		    memcpy(copy_header, work_header, work_bytes);
		    copy = Val_hp(copy_header);
		    copy_addr = (void *) copy;
		    Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color;
		}

		/* Add the association (work_addr -> copy_addr) to t: */

		code = netsys_htab_add(t, work_addr, copy_addr);
		if (code < 0) return code;

		/* Add the sub values of work_addr to q: */

		for (i=0; i < work_words; ++i) {
		    value field = Field(work, i);
		    if (Is_block (field)) {
			code = netsys_queue_add(q, (void *) field);
			if (code != 0) return code;
		    }
		}
	    }
	    else {
		/* It an opaque value */
		int do_copy = 0;
		int do_bigarray = 0;
		void *target_ops = NULL;
		char caml_id = ' ';  /* only b, i, j, n */
		/* Check for bigarrays and other custom blocks */
		switch (work_tag) {
		case Abstract_tag:
		    return(-2);
		case String_tag:
		    do_copy = 1; break;
		case Double_tag:
		    do_copy = 1; break;
		case Double_array_tag:
		    do_copy = 1; break;
		case Custom_tag: 
		    {
			struct custom_operations *custom_ops;
			char *id;

			custom_ops = Custom_ops_val(work);
			id = custom_ops->identifier;
			if (id[0] == '_') {
			    switch (id[1]) {
			    case 'b':
				if (!enable_bigarrays) return (-2);
				if (strcmp(id, "_bigarray") == 0) {
				    caml_id = 'b';
				    break;
				}
			    case 'i': /* int32 */
			    case 'j': /* int64 */
			    case 'n': /* nativeint */
				if (!enable_customs) return (-2);
				if (id[2] == 0) {
				    caml_id = id[1];
				    break;
				}
			    default:
				return (-2);
			    }
			}
			else
			    return (-2);
		    }
		}; /* switch */

		switch (caml_id) {  /* look closer at some cases */
		case 'b': {
		    target_ops = bigarray_target_ops;
		    do_copy = 1;
		    do_bigarray = 1;
		    break;
		}
		case 'i':
		    target_ops = int32_target_ops; do_copy = 1; break;
		case 'j':
		    target_ops = int64_target_ops; do_copy = 1; break;
		case 'n':
		    target_ops = nativeint_target_ops; do_copy = 1; break;
		};

		if (do_copy) {  
		    /* Copy the value */
		    work_bytes = Bhsize_hp(work_header);
		    copy_header = dest_cur;
		    dest_cur += work_bytes;

		    if (simulation)
			copy_addr = work_addr;
		    else {
			if (dest_cur > dest_end) return (-4);
			memcpy(copy_header, work_header, work_bytes);
			copy = Val_hp(copy_header);
			copy_addr = (void *) copy;
			Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color;
			if (target_ops != NULL)
			    Custom_ops_val(copy) = target_ops;
		    }
		    
		    code = netsys_htab_add(t, work_addr, copy_addr);
		    if (code < 0) return code;
		}

		if (do_bigarray) {
		    /* postprocessing for copying bigarrays */
		    struct caml_ba_array *b_work, *b_copy;
		    void * data_copy;
		    char * data_header;
		    header_t data_header1;
		    size_t size = 1;
		    size_t size_aligned;
		    size_t size_words;
		    b_work = Bigarray_val(work);
		    b_copy = Bigarray_val(copy);
		    for (i = 0; i < b_work->num_dims; i++) {
			size = size * b_work->dim[i];
		    };
		    size = 
			size * 
			caml_ba_element_size[b_work->flags & BIGARRAY_KIND_MASK];

		    size_aligned = size;
		    if (size%sizeof(void *) != 0)
			size_aligned += sizeof(void *) - (size%sizeof(void *));
		    size_words = Wsize_bsize(size_aligned);

		    /* If we put the copy of the bigarray into our own
		       dest buffer, also generate an abstract header,
		       so it can be skipped when iterating over it.

		       We use here a special representation, so we can
		       encode any length in this header (with a normal
		       Ocaml header we are limited by Max_wosize, e.g.
		       16M on 32 bit systems). The special representation
		       is an Abstract_tag with zero length, followed
		       by the real length (in words)
		    */
		    
		    if (enable_bigarrays == 2) {
			data_header = dest_cur;
			dest_cur += 2*sizeof(void *);
			data_copy = dest_cur;
			dest_cur += size_aligned;
		    } else if (!simulation) {
			data_header = NULL;
			data_copy = stat_alloc(size_aligned);
		    };

		    if (!simulation) {
			if (dest_cur > dest_end) return (-4);

			/* Initialize header: */
			
			if (data_header != NULL) {
			    data_header1 = Abstract_tag;
			    memcpy(data_header, 
				   (char *) &data_header1,
				   sizeof(header_t));
			    memcpy(data_header + sizeof(header_t),
				   (size_t *) &size_words,
				   sizeof(size_t));
			};

			/* Copy bigarray: */
			
			memcpy(data_copy, b_work->data, size);
			b_copy->data = data_copy;
			b_copy->proxy = NULL;

			/* If the copy is in our own buffer, it is
			   now externally managed.
			*/
			b_copy->flags = 
			    (b_copy->flags & ~CAML_BA_MANAGED_MASK) |
			    (enable_bigarrays == 2 ? 
			     CAML_BA_EXTERNAL :
			     CAML_BA_MANAGED);
		    }
		}

	    } /* if (work_tag < No_scan_tag) */
	} /* if (copy_addr == NULL) */

	/* Switch to next address in q: */
    next:
	code = netsys_queue_take(q, &work_addr);
    } /* while */
    
    /* Second pass. The copied blocks still have fields pointing to the
       original blocks. We fix that now by iterating once over the copied
       memory block.
    */

    if (!simulation) {
	/* fprintf(stderr, "second pass\n"); */
	dest_ptr = dest;
	while (dest_ptr < dest_cur) {
	    copy_header1 = *((header_t *) dest_ptr);
	    copy_tag = Tag_hd(copy_header1);
	    copy_words = Wosize_hd(copy_header1);
	    copy = (value) (dest_ptr + sizeof(void *));
	    
	    if (copy_tag < No_scan_tag) {
		for (i=0; i < copy_words; ++i) {
		    value field = Field(copy, i);
		    if (Is_block (field)) {
			/* It is a pointer. Try to fix it up. */
			code = netsys_htab_lookup(t, (void *) field,
						  &fixup_addr);
			if (code != 0) return code;

			if (fixup_addr != NULL)
			    Field(copy,i) = 
				(value) (((char *) fixup_addr) + addr_delta);
		    }
		}
	    }
	    else if (copy_tag == Abstract_tag && copy_words == 0) {
		/* our special representation for skipping data regions */
		copy_words = ((size_t *) dest_ptr)[1] + 1;
	    };
	    
	    dest_ptr += (copy_words + 1) * sizeof(void *);
	}
    }	

    /* hey, fine. Return result */
    *start_offset = sizeof(void *);
    *bytelen = dest_cur - dest;

    /* fprintf(stderr, "return regularly\n");*/

    return 0;
}
value netsys_copy_value(value flags, value orig)
{
    int code;
    int cflags;
    intnat start_offset, bytelen;
    mlsize_t wosize;
    char *dest, *dest_end, *extra_block, *extra_block_end;
    int color;
    struct named_custom_ops bigarray_ops;
    struct named_custom_ops int32_ops;
    struct named_custom_ops int64_ops;
    struct named_custom_ops nativeint_ops;
    CAMLparam2(orig,flags);
    CAMLlocal1(block);

    /* First test on trivial cases: */
    if (Is_long(orig) || Wosize_val(orig) == 0) {
	CAMLreturn(orig);
    };

    code = prep_stat_tab();
    if (code != 0) goto exit;

    code = prep_stat_queue();
    if (code != 0) goto exit;

    cflags = caml_convert_flag_list(flags, init_value_flags);

    /* fprintf (stderr, "counting\n"); */

    /* Count only! */
    code = netsys_init_value_1(stat_tab, stat_queue, NULL, NULL, orig, 
			       (cflags & 1) ? 1 : 0,  /* enable_bigarrays */
			       (cflags & 2) ? 1 : 0,  /* enable_customs */
			       1, /* enable_atoms */
			       1, /* simulate */
			       NULL, NULL, 0, &start_offset, &bytelen);
    if (code != 0) goto exit;

    /* fprintf (stderr, "done counting bytelen=%ld\n", bytelen); */

    /* set up the custom ops. We always set this, because we assume that
       the values in [orig] are not trustworthy
    */
    bigarray_ops.name = "_bigarray";
    bigarray_ops.ops = 
	Custom_ops_val(alloc_bigarray_dims(CAML_BA_UINT8 | BIGARRAY_C_LAYOUT, 
					   1, NULL, 1));
    bigarray_ops.next = &int32_ops;

    int32_ops.name = "_i";
    int32_ops.ops = Custom_ops_val(caml_copy_int32(0));
    int32_ops.next = &int64_ops;

    int64_ops.name = "_j";
    int64_ops.ops = Custom_ops_val(caml_copy_int64(0));
    int64_ops.next = &nativeint_ops;

    nativeint_ops.name = "_n";
    nativeint_ops.ops = Custom_ops_val(caml_copy_nativeint(0));
    nativeint_ops.next = NULL;

    /* alloc */

    extra_block = NULL;
    extra_block_end = NULL;

    /* shamelessly copied from intern.c */
    wosize = Wosize_bhsize(bytelen);
    /* fprintf (stderr, "wosize=%ld\n", wosize); */
    if (wosize > Max_wosize) {
	/* Round desired size up to next page */
	asize_t request = ((bytelen + Page_size - 1) >> Page_log) << Page_log;
	extra_block = caml_alloc_for_heap(request);
	if (extra_block == NULL) caml_raise_out_of_memory();
	extra_block_end = extra_block + request;
	color = caml_allocation_color(extra_block);
	dest = extra_block;
	dest_end = dest + bytelen;
	block = Val_hp(extra_block);
    } else {
예제 #18
0
static void lm_heap_check_aux1(char *name)
{
    char *start, *ptr, *end;
    char *v;
    value p, *next;
    mlsize_t size;
    unsigned i, index, found;
	 char *pointers[1 << 16];

    start = caml_young_start;
    ptr = caml_young_ptr;
    end = caml_young_end;

    fprintf(stderr, "AAA: %s: [0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx] (%ld/%ld/%ld bytes)\n",
            name,
            (unsigned long) caml_young_start,
            (unsigned long) caml_young_ptr,
            (unsigned long) caml_young_limit,
            (unsigned long) caml_young_end, 
            ((unsigned long) caml_young_end) - (unsigned long) caml_young_ptr,
            ((unsigned long) caml_young_end) - (unsigned long) caml_young_limit,
            ((unsigned long) caml_young_end) - (unsigned long) caml_young_start);
    fflush(stderr);

    /*
     * Phase 1: check that the headers have the right sizes.
     */
    v = (char *) Val_hp(caml_young_ptr);
    index = 0;
    while(v < caml_young_end) {
        pointers[index++] = (char *) v;
        size = Wosize_val(v);
        fprintf(stderr, "%s: 0x%08lx: size %lud, tag = %d\n",
                name, (unsigned long) v, size, Tag_val(v));
        found = 0;
        for(i = 0; i != 10; i++) {
            next = &Field(v, size + i);
            if(next < (value *) caml_young_end) {
                p = *next;
#define Debug_free_minor 0xD700D6D7ul
                if(p == Debug_free_minor) {
                    fprintf(stderr, "\tnext[%d]:0x%08lx = 0x%08lx\n", i, (unsigned long) next, (unsigned long) p);
                    found = 1;
                }
                else if(found)
                    fprintf(stderr, "\tnext[%d]:0x%08lx = 0x%08lx, size = %lud, tag = %d\n",
                            i, (unsigned long) next, (unsigned long) p, Wosize_hd(p), Tag_hd(p));
            }
        }
        fflush(stderr);
        v = (char *) &Field(v, size + 1);
    }
    if(v > (char *) Val_hp(caml_young_end)) {
        fprintf(stderr, "%s: heap is bogus\n", name);
        fflush(stderr);
        return;
    }

    /*
     * Phase 2: check that all the fields point to actual
     * values.
     */
    v = (char *) Val_hp(caml_young_ptr);
    while(v < caml_young_end) {
        size = Wosize_val(v);
        if(Tag_val(v) < No_scan_tag) {
            fprintf(stderr, "%s: scanning 0x%08lx: size %lud, tag = %d\n", name, (unsigned long) v, size, Tag_val(v));
            fflush(stderr);
            for(i = 0; i != size; i++) {
                char *p = (char *) Field(v, i);
                if(Is_block((value) p)) {
                    if(p >= caml_young_limit && p < caml_young_ptr) {
                        fprintf(stderr, "%s: pointer refers to empty young space\n", name);
                        fflush(stderr);
                        return;
                    }
                    if(p >= caml_young_ptr && p < caml_young_end)
                        search_pointer(pointers, name, index, p, v, i);
                }
            }
        }
        v = (char *) &Field(v, size + 1);
    }
}
예제 #19
0
CAMLexport value caml_promote(struct domain* domain, value root)
{
  value **r;
  value iter, f;
  mlsize_t i;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  float percent_to_scan;
  uintnat prev_alloc_words = domain_state->allocated_words;
  struct oldify_state st = {0};
  struct caml_ephe_ref_elt *re;

  /* Integers are already shared */
  if (Is_long(root))
    return root;

  /* Objects which are in the major heap are already shared. */
  if (!Is_minor(root))
    return root;

  st.oldest_promoted = (value)domain_state->young_start;
  st.promote_domain = domain;

  CAMLassert(caml_owner_of_young_block(root) == domain);
  oldify_one (&st, root, &root);
  oldify_mopup (&st);

  CAMLassert (!Is_minor(root));
  /* FIXME: surely a newly-allocated root is already darkened? */
  caml_darken(0, root, 0);

  percent_to_scan = st.oldest_promoted <= (value)young_ptr ? 0.0 :
    (((float)(st.oldest_promoted - (value)young_ptr)) * 100.0 /
     ((value)young_end - (value)domain_state->young_start));

  if (percent_to_scan > Percent_to_promote_with_GC) {
    caml_gc_log("caml_promote: forcing minor GC. %%_minor_to_scan=%f", percent_to_scan);
    // ???
    caml_empty_minor_heap_domain (domain);
  } else {
    caml_do_local_roots (&forward_pointer, st.promote_domain, domain, 1);
    caml_scan_stack (&forward_pointer, st.promote_domain, domain_state->current_stack);

    /* Scan major to young pointers. */
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value old_p = **r;
      if (Is_block(old_p) && is_in_interval(old_p,young_ptr,young_end)) {
        value new_p = old_p;
        forward_pointer (st.promote_domain, new_p, &new_p);
        if (old_p != new_p) {
          if (caml_domain_alone())
            **r = new_p;
          else
            atomic_compare_exchange_strong((atomic_value*)*r, &old_p, new_p);
        }
      }
    }

    /* Scan ephemeron ref table */
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      value* key = &Op_val(re->ephe)[re->offset];
      if (Is_block(*key) && is_in_interval(*key,young_ptr,young_end)) {
        forward_pointer (st.promote_domain, *key, key);
      }
    }

    /* Scan young to young pointers */
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      forward_pointer (st.promote_domain, **r, *r);
    }

    /* Scan newer objects */
    for (iter = (value)young_ptr;
         iter <= st.oldest_promoted;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      value curr = Val_hp(iter);
      if (hd != 0) {
        tag_t tag = Tag_hd (hd);
        if (tag == Cont_tag) {
          struct stack_info* stk = Ptr_val(Op_val(curr)[0]);
          if (stk != NULL)
            caml_scan_stack(&forward_pointer, st.promote_domain, stk);
        } else if (tag < No_scan_tag) {
          for (i = 0; i < Wosize_hd (hd); i++) {
            f = Op_val(curr)[i];
            if (Is_block(f)) {
              forward_pointer (st.promote_domain, f,((value*)curr) + i);
            }
          }
        }
      }
    }
  }
  domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;
  return root;
}
예제 #20
0
void caml_empty_minor_heap_domain (struct domain* domain)
{
  CAMLnoalloc;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  unsigned rewrite_successes = 0;
  unsigned rewrite_failures = 0;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  uintnat minor_allocated_bytes = young_end - young_ptr;
  struct oldify_state st = {0};
  value **r;
  struct caml_ephe_ref_elt *re;
  struct caml_custom_elt *elt;

  st.promote_domain = domain;

  if (minor_allocated_bytes != 0) {
    uintnat prev_alloc_words = domain_state->allocated_words;

#ifdef DEBUG
    /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers
       from older to younger objects */
    {
    struct addrmap young_young_ptrs = ADDRMAP_INIT;
    mlsize_t i;
    value iter;
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1;
    }
    for (iter = (value)young_ptr;
         iter < (value)young_end;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      if (hd != 0) {
        value curr = Val_hp(iter);
        tag_t tag = Tag_hd (hd);

        if (tag < No_scan_tag && tag != Cont_tag) {
          // FIXME: should scan Cont_tag
          for (i = 0; i < Wosize_hd(hd); i++) {
            value* f = Op_val(curr) + i;
            if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) &&
                *f < curr) {
              CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f));
            }
          }
        }
      }
    }
    caml_addrmap_clear(&young_young_ptrs);
    }
#endif

    caml_gc_log ("Minor collection of domain %d starting", domain->state->id);
    caml_ev_begin("minor_gc");
    caml_ev_begin("minor_gc/roots");
    caml_do_local_roots(&oldify_one, &st, domain, 0);

    caml_scan_stack(&oldify_one, &st, domain_state->current_stack);

    for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) {
      value x = **r;
      oldify_one (&st, x, &x);
    }
    caml_ev_end("minor_gc/roots");

    caml_ev_begin("minor_gc/promote");
    oldify_mopup (&st);
    caml_ev_end("minor_gc/promote");

    caml_ev_begin("minor_gc/ephemerons");
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      CAMLassert (Ephe_domain(re->ephe) == domain);
      if (re->offset == CAML_EPHE_DATA_OFFSET) {
        /* Data field has already been handled in oldify_mopup. Handle only
         * keys here. */
        continue;
      }
      value* key = &Op_val(re->ephe)[re->offset];
      if (*key != caml_ephe_none && Is_block(*key) &&
          is_in_interval(*key, young_ptr, young_end)) {
        resolve_infix_val(key);
        if (Hd_val(*key) == 0) { /* value copied to major heap */
          *key = Op_val(*key)[0];
        } else {
          CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end));
          *key = caml_ephe_none;
          Ephe_data(re->ephe) = caml_ephe_none;
        }
      }
    }
    caml_ev_end("minor_gc/ephemerons");

    caml_ev_begin("minor_gc/update_minor_tables");
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value v = **r;
      if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) {
        value vnew;
        header_t hd = Hd_val(v);
        int offset = 0;
        if (Tag_hd(hd) == Infix_tag) {
          offset = Infix_offset_hd(hd);
          v -= offset;
        }
        CAMLassert (Hd_val(v) == 0);
        vnew = Op_val(v)[0] + offset;
        CAMLassert (Is_block(vnew) && !Is_minor(vnew));
        CAMLassert (Hd_val(vnew));
        if (Tag_hd(hd) == Infix_tag) {
          CAMLassert(Tag_val(vnew) == Infix_tag);
          v += offset;
        }
        if (caml_domain_alone()) {
          **r = vnew;
          ++rewrite_successes;
        } else {
          if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew))
            ++rewrite_successes;
          else
            ++rewrite_failures;
        }
      }
    }
    CAMLassert (!caml_domain_alone() || rewrite_failures == 0);
    caml_ev_end("minor_gc/update_minor_tables");

    caml_ev_begin("minor_gc/finalisers");
    caml_final_update_last_minor(domain);
    /* Run custom block finalisation of dead minor values */
    for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) {
      value v = elt->block;
      if (Hd_val(v) == 0) {
        /* !!caml_adjust_gc_speed(elt->mem, elt->max); */
      } else {
        /* Block will be freed: call finalisation function, if any */
        void (*final_fun)(value) = Custom_ops_val(v)->finalize;
        if (final_fun != NULL) final_fun(v);
      }
    }
    caml_final_empty_young(domain);
    caml_ev_end("minor_gc/finalisers");


    clear_table ((struct generic_table *)&minor_tables->major_ref);
    clear_table ((struct generic_table *)&minor_tables->minor_ref);
    clear_table ((struct generic_table *)&minor_tables->ephe_ref);
    clear_table ((struct generic_table *)&minor_tables->custom);

    domain_state->young_ptr = domain_state->young_end;
    domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes);
    domain_state->stat_minor_collections++;
    domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;

    caml_ev_end("minor_gc");
    caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u",
                 domain->state->id,
                 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes,
                 (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures);
  }
  else {
    caml_final_empty_young(domain);
    caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id);
  }

#ifdef DEBUG
  {
    value *p;
    for (p = (value *) domain_state->young_start;
         p < (value *) domain_state->young_end; ++p){
      *p = Debug_free_minor;
    }
  }
#endif
}
예제 #21
0
파일: compact.c 프로젝트: MassD/ocaml
static void do_compaction_r (CAML_R)
{
  char *ch, *chend;
                                          Assert (caml_gc_phase == Phase_idle);
  caml_gc_message (0x10, "Compacting heap...\n", 0);

#ifdef DEBUG
  caml_heap_check_r (ctx);
#endif

  /* First pass: encode all noninfix headers. */
  {
    ch = caml_heap_start;
    while (ch != NULL){
      header_t *p = (header_t *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        header_t hd = Hd_hp (p);
        mlsize_t sz = Wosize_hd (hd);

        if (Is_blue_hd (hd)){
          /* Free object.  Give it a string tag. */
          Hd_hp (p) = Make_ehd (sz, String_tag, 3);
        }else{                                      Assert (Is_white_hd (hd));
          /* Live object.  Keep its tag. */
          Hd_hp (p) = Make_ehd (sz, Tag_hd (hd), 3);
        }
        p += Whsize_wosize (sz);
      }
      ch = Chunk_next (ch);
    }
  }


  /* Second pass: invert pointers.
     Link infix headers in each block in an inverted list of inverted lists.
     Don't forget roots and weak pointers. */
  {
    /* Invert roots first because the threads library needs some heap
       data structures to find its roots.  Fortunately, it doesn't need
       the headers (see above). */
    caml_do_roots_r (ctx, invert_root_r);
    caml_final_do_weak_roots_r (ctx, invert_root_r);

    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;
      chend = ch + Chunk_size (ch);

      while ((char *) p < chend){
        word q = *p;
        size_t sz, i;
        tag_t t;
        word *infixes;

        while (Ecolor (q) == 0) q = * (word *) q;
        sz = Whsize_ehd (q);
        t = Tag_ehd (q);

        if (t == Infix_tag){
          /* Get the original header of this block. */
          infixes = p + sz;
          q = *infixes;
          while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3);
          sz = Whsize_ehd (q);
          t = Tag_ehd (q);
        }

        if (t < No_scan_tag){
          for (i = 1; i < sz; i++) invert_pointer_at_r (ctx, &(p[i]));
        }
        p += sz;
      }
      ch = Chunk_next (ch);
    }
    /* Invert weak pointers. */
    {
      value *pp = &caml_weak_list_head;
      value p;
      word q;
      size_t sz, i;

      while (1){
        p = *pp;
        if (p == (value) NULL) break;
        q = Hd_val (p);
        while (Ecolor (q) == 0) q = * (word *) q;
        sz = Wosize_ehd (q);
        for (i = 1; i < sz; i++){
          if (Field (p,i) != caml_weak_none){
            invert_pointer_at_r (ctx, (word *) &(Field (p,i)));
          }
        }
        invert_pointer_at_r (ctx, (word *) pp);
        pp = &Field (p, 0);
      }
    }
  }


  /* Third pass: reallocate virtually; revert pointers; decode headers.
     Rebuild infix headers. */
  {
    init_compact_allocate_r (ctx);
    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        word q = *p;

        if (Ecolor (q) == 0 || Tag_ehd (q) == Infix_tag){
          /* There were (normal or infix) pointers to this block. */
          size_t sz;
          tag_t t;
          char *newadr;
          word *infixes = NULL;

          while (Ecolor (q) == 0) q = * (word *) q;
          sz = Whsize_ehd (q);
          t = Tag_ehd (q);

          if (t == Infix_tag){
            /* Get the original header of this block. */
            infixes = p + sz;
            q = *infixes;                             Assert (Ecolor (q) == 2);
            while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3);
            sz = Whsize_ehd (q);
            t = Tag_ehd (q);
          }

          newadr = compact_allocate_r (ctx, Bsize_wsize (sz));
          q = *p;
          while (Ecolor (q) == 0){
            word next = * (word *) q;
            * (word *) q = (word) Val_hp (newadr);
            q = next;
          }
          *p = Make_header (Wosize_whsize (sz), t, Caml_white);

          if (infixes != NULL){
            /* Rebuild the infix headers and revert the infix pointers. */
            while (Ecolor ((word) infixes) != 3){
              infixes = (word *) ((word) infixes & ~(uintnat) 3);
              q = *infixes;
              while (Ecolor (q) == 2){
                word next;
                q = (word) q & ~(uintnat) 3;
                next = * (word *) q;
                * (word *) q = (word) Val_hp ((word *) newadr + (infixes - p));
                q = next;
              }                    Assert (Ecolor (q) == 1 || Ecolor (q) == 3);
              *infixes = Make_header (infixes - p, Infix_tag, Caml_white);
              infixes = (word *) q;
            }
          }
          p += sz;
        }else{                                        Assert (Ecolor (q) == 3);
          /* This is guaranteed only if caml_compact_heap was called after a
             nonincremental major GC:       Assert (Tag_ehd (q) == String_tag);
          */
          /* No pointers to the header and no infix header:
             the object was free. */
          *p = Make_header (Wosize_ehd (q), Tag_ehd (q), Caml_blue);
          p += Whsize_ehd (q);
        }
      }
      ch = Chunk_next (ch);
    }
  }


  /* Fourth pass: reallocate and move objects.
     Use the exact same allocation algorithm as pass 3. */
  {
    init_compact_allocate_r (ctx);
    ch = caml_heap_start;
    while (ch != NULL){
      word *p = (word *) ch;

      chend = ch + Chunk_size (ch);
      while ((char *) p < chend){
        word q = *p;
        if (Color_hd (q) == Caml_white){
          size_t sz = Bhsize_hd (q);
          char *newadr = compact_allocate_r (ctx, sz);
          memmove (newadr, p, sz);
          p += Wsize_bsize (sz);
        }else{
          Assert (Color_hd (q) == Caml_blue);
          p += Whsize_hd (q);
        }
      }
      ch = Chunk_next (ch);
    }
  }

  /* Shrink the heap if needed. */
  {
    /* Find the amount of live data and the unshrinkable free space. */
    asize_t live = 0;
    asize_t free = 0;
    asize_t wanted;

    ch = caml_heap_start;
    while (ch != NULL){
      if (Chunk_alloc (ch) != 0){
        live += Wsize_bsize (Chunk_alloc (ch));
        free += Wsize_bsize (Chunk_size (ch) - Chunk_alloc (ch));
      }
      ch = Chunk_next (ch);
    }

    /* Add up the empty chunks until there are enough, then remove the
       other empty chunks. */
    wanted = caml_percent_free * (live / 100 + 1);
    ch = caml_heap_start;
    while (ch != NULL){
      char *next_chunk = Chunk_next (ch);  /* Chunk_next (ch) will be erased */

      if (Chunk_alloc (ch) == 0){
        if (free < wanted){
          free += Wsize_bsize (Chunk_size (ch));
        }else{
          caml_shrink_heap_r (ctx, ch);
        }
      }
      ch = next_chunk;
    }
  }

  /* Rebuild the free list. */
  {
    ch = caml_heap_start;
    caml_fl_reset_r (ctx);
    while (ch != NULL){
      if (Chunk_size (ch) > Chunk_alloc (ch)){
        caml_make_free_blocks_r (ctx, (value *) (ch + Chunk_alloc (ch)),
                                 Wsize_bsize (Chunk_size(ch)-Chunk_alloc(ch)), 1,
                                 Caml_white);
      }
      ch = Chunk_next (ch);
    }
  }
  ++ caml_stat_compactions;
  caml_gc_message (0x10, "done.\n", 0);
}
예제 #22
0
CAMLexport value caml_atom(tag_t tag) {
  return Val_hp(&atoms[tag]);
}