/* [caml_fl_allocate] does not set the header of the newly allocated block. The calling function must do it before any GC function gets called. [caml_fl_allocate] returns a head pointer. */ char *caml_fl_allocate (mlsize_t wo_sz) { char *cur, *prev; Assert (sizeof (char *) == sizeof (value)); Assert (fl_prev != NULL); Assert (wo_sz >= 1); /* Search from [fl_prev] to the end of the list. */ prev = fl_prev; cur = Next (prev); while (cur != NULL){ Assert (Is_in_heap (cur)); if (Wosize_bp (cur) >= wo_sz){ return allocate_block (Whsize_wosize (wo_sz), prev, cur); } prev = cur; cur = Next (prev); } fl_last = prev; /* Search from the start of the list to [fl_prev]. */ prev = Fl_head; cur = Next (prev); while (prev != fl_prev){ if (Wosize_bp (cur) >= wo_sz){ return allocate_block (Whsize_wosize (wo_sz), prev, cur); } prev = cur; cur = Next (prev); } /* No suitable block was found. */ return NULL; }
/* Cut a block of memory into Max_wosize pieces, give them headers, and optionally merge them into the free list. arguments: p: pointer to the first word of the block size: size of the block (in words) do_merge: 1 -> do merge; 0 -> do not merge color: which color to give to the pieces; if [do_merge] is 1, this is overridden by the merge code, but we have historically used [Caml_white]. */ void caml_make_free_blocks (value *p, mlsize_t size, int do_merge, int color) { mlsize_t sz; while (size > 0){ if (size > Whsize_wosize (Max_wosize)){ sz = Whsize_wosize (Max_wosize); }else{ sz = size; } *(header_t *)p = Make_header (Wosize_whsize (sz), 0, color); if (do_merge) caml_fl_merge_block (Bp_hp (p)); size -= sz; p += sz; } }
static void realloc_minor (value t, value null) { CAMLparam2(t, null); size_t size = t_minor_size(t); if (size == 0) size = 1; size = size * 2; if ((size * 2 > Max_young_wosize) || (caml_young_ptr - Whsize_wosize (size * 2) < caml_young_start)) { // Not enough room, no need to allocate: // collect minor heap, flush minor table if (!t_major_has_capacity(t, t_major_fill(t) + t_minor_fill(t) + 1)) realloc_major(t, null); caml_minor_collection(); flush_minor(t, null); } else { rehash_minor(t, null, size); } CAMLreturn0; }
value* caml_shared_try_alloc(struct caml_heap_state* local, mlsize_t wosize, tag_t tag, int pinned) { mlsize_t whsize = Whsize_wosize(wosize); value* p; uintnat colour; Assert (wosize > 0); Assert (tag != Infix_tag); if (whsize <= SIZECLASS_MAX) { sizeclass sz = sizeclass_wsize[whsize]; Assert(wsize_sizeclass[sz] >= whsize); p = pool_allocate(local, sz); if (!p) return 0; struct heap_stats* s = &local->stats; s->pool_live_blocks++; s->pool_live_words += whsize; s->pool_frag_words += wsize_sizeclass[sz] - whsize; } else { p = large_allocate(local, Bsize_wsize(whsize)); if (!p) return 0; } colour = pinned ? NOT_MARKABLE : global.MARKED; Hd_hp (p) = Make_header(wosize, tag, colour); #ifdef DEBUG { int i; for (i = 0; i < wosize; i++) { Op_val(Val_hp(p))[i] = Debug_free_major; } } #endif return p; }
/* Allocate more memory from malloc for the heap. Return a blue block of at least the requested size. The blue block is chained to a sequence of blue blocks (through their field 0); the last block of the chain is pointed by field 1 of the first. There may be a fragment after the last block. The caller must insert the blocks into the free list. [request] is a number of words and must be less than or equal to [Max_wosize]. Return NULL when out of memory. */ static value *expand_heap (mlsize_t request) { /* these point to headers, but we do arithmetic on them, hence [value *]. */ value *mem, *hp, *prev; asize_t over_request, malloc_request, remain; Assert (request <= Max_wosize); over_request = Whsize_wosize (request + request / 100 * caml_percent_free); malloc_request = caml_round_heap_chunk_wsz (over_request); mem = (value *) caml_alloc_for_heap (Bsize_wsize (malloc_request)); if (mem == NULL){ caml_gc_message (0x04, "No room for growing heap\n", 0); return NULL; } remain = malloc_request; prev = hp = mem; /* FIXME find a way to do this with a call to caml_make_free_blocks */ while (Wosize_whsize (remain) > Max_wosize){ Hd_hp (hp) = Make_header (Max_wosize, 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif hp += Whsize_wosize (Max_wosize); remain -= Whsize_wosize (Max_wosize); Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); prev = hp; } if (remain > 1){ Hd_hp (hp) = Make_header (Wosize_whsize (remain), 0, Caml_blue); #ifdef DEBUG caml_set_fields (Val_hp (hp), 0, Debug_free_major); #endif Field (Val_hp (mem), 1) = Field (Val_hp (prev), 0) = Val_hp (hp); Field (Val_hp (hp), 0) = (value) NULL; }else{ Field (Val_hp (prev), 0) = (value) NULL; if (remain == 1) Hd_hp (hp) = Make_header (0, 0, Caml_white); } Assert (Wosize_hp (mem) >= request); if (caml_add_to_heap ((char *) mem) != 0){ caml_free_for_heap ((char *) mem); return NULL; } return Op_hp (mem); }
static value alloc_shared(mlsize_t wosize, tag_t tag) { void* mem = caml_shared_try_alloc(caml_domain_self()->shared_heap, wosize, tag, 0 /* not promotion */); caml_domain_state->allocated_words += Whsize_wosize(wosize); if (mem == NULL) { caml_fatal_error("allocation failure during minor GC"); } return Val_hp(mem); }
struct pool* caml_pool_of_shared_block(value v) { Assert (Is_block(v) && !Is_minor(v)); mlsize_t whsize = Whsize_wosize(Wosize_val(v)); if (whsize > 0 && whsize <= SIZECLASS_MAX) { return (pool*)((uintnat)v &~(POOL_WSIZE * sizeof(value) - 1)); } else { return 0; } }
struct domain* caml_owner_of_shared_block(value v) { Assert (Is_block(v) && !Is_minor(v)); mlsize_t whsize = Whsize_wosize(Wosize_val(v)); Assert (whsize > 0); /* not an atom */ if (whsize <= SIZECLASS_MAX) { /* FIXME: ORD: if we see the object, we must see the owner */ pool* p = (pool*)((uintnat)v &~(POOL_WSIZE * sizeof(value) - 1)); return p->owner; } else { large_alloc* a = (large_alloc*)(Hp_val(v) - LARGE_ALLOC_HEADER_SZ); return a->owner; } }
CAMLexport value caml_alloc_shr (mlsize_t wosize, tag_t tag) { header_t *hp; value *new_block; if (wosize > Max_wosize) caml_raise_out_of_memory (); hp = caml_fl_allocate (wosize); if (hp == NULL){ new_block = expand_heap (wosize); if (new_block == NULL) { if (caml_in_minor_collection) caml_fatal_error ("Fatal error: out of memory.\n"); else caml_raise_out_of_memory (); } caml_fl_add_blocks ((value) new_block); hp = caml_fl_allocate (wosize); } Assert (Is_in_heap (Val_hp (hp))); /* Inline expansion of caml_allocation_color. */ if (caml_gc_phase == Phase_mark || (caml_gc_phase == Phase_sweep && (addr)hp >= (addr)caml_gc_sweep_hp)){ Hd_hp (hp) = Make_header (wosize, tag, Caml_black); }else{ Assert (caml_gc_phase == Phase_idle || (caml_gc_phase == Phase_sweep && (addr)hp < (addr)caml_gc_sweep_hp)); Hd_hp (hp) = Make_header (wosize, tag, Caml_white); } Assert (Hd_hp (hp) == Make_header (wosize, tag, caml_allocation_color (hp))); caml_allocated_words += Whsize_wosize (wosize); if (caml_allocated_words > caml_minor_heap_wsz){ caml_urge_major_slice (); } #ifdef DEBUG { uintnat i; for (i = 0; i < wosize; i++){ Field (Val_hp (hp), i) = Debug_uninit_major; } } #endif return Val_hp (hp); }
static value next_minor_block(caml_domain_state* domain_state, value curr_hp) { mlsize_t wsz; header_t hd; value curr_val; CAMLassert ((value)domain_state->young_ptr <= curr_hp); CAMLassert (curr_hp < (value)domain_state->young_end); hd = Hd_hp(curr_hp); curr_val = Val_hp(curr_hp); if (hd == 0) { /* Forwarded object, find the promoted version */ curr_val = Op_val(curr_val)[0]; } CAMLassert (Is_block(curr_val) && Hd_val(curr_val) != 0 && Tag_val(curr_val) != Infix_tag); wsz = Wosize_val(curr_val); CAMLassert (wsz <= Max_young_wosize); return curr_hp + Bsize_wsize(Whsize_wosize(wsz)); }
value* caml_shared_try_alloc(struct caml_heap_state* local, mlsize_t wosize, tag_t tag, int pinned) { mlsize_t whsize = Whsize_wosize(wosize); value* p; Assert (wosize > 0); Assert (tag != Infix_tag); if (whsize <= SIZECLASS_MAX) { p = pool_allocate(local, sizeclass_wsize[whsize]); } else { p = large_allocate(local, Bsize_wsize(whsize)); } if (!p) return 0; Hd_hp (p) = Make_header(wosize, tag, pinned ? NOT_MARKABLE : global.UNMARKED); #ifdef DEBUG { int i; for (i = 0; i < wosize; i++) { Op_val(Val_hp(p))[i] = Debug_free_major; } } #endif return p; }
EXTERN value alloc_shr (mlsize_t wosize, tag_t tag) { char *hp, *new_block; hp = fl_allocate (wosize); if (hp == NULL){ new_block = expand_heap (wosize); if (new_block == NULL) raise_out_of_memory (); fl_add_block (new_block); hp = fl_allocate (wosize); if (hp == NULL) fatal_error ("alloc_shr: expand heap failed\n"); } Assert (Is_in_heap (Val_hp (hp))); if (gc_phase == Phase_mark || (addr)hp >= (addr)gc_sweep_hp){ Hd_hp (hp) = Make_header (wosize, tag, Black); }else{ Hd_hp (hp) = Make_header (wosize, tag, White); } allocated_words += Whsize_wosize (wosize); if (allocated_words > Wsize_bsize (minor_heap_size)) force_minor_gc (); return Val_hp (hp); }
/* [caml_fl_merge_block] returns the head pointer of the next block after [bp], because merging blocks may change the size of [bp]. */ char *caml_fl_merge_block (char *bp) { char *prev, *cur, *adj; header_t hd = Hd_bp (bp); mlsize_t prev_wosz; caml_fl_cur_size += Whsize_hd (hd); #ifdef DEBUG caml_set_fields (bp, 0, Debug_free_major); #endif prev = caml_fl_merge; cur = Next (prev); /* The sweep code makes sure that this is the right place to insert this block: */ Assert (prev < bp || prev == Fl_head); Assert (cur > bp || cur == NULL); if (policy == Policy_first_fit) truncate_flp (prev); /* If [last_fragment] and [bp] are adjacent, merge them. */ if (last_fragment == Hp_bp (bp)){ mlsize_t bp_whsz = Whsize_bp (bp); if (bp_whsz <= Max_wosize){ hd = Make_header (bp_whsz, 0, Caml_white); bp = last_fragment; Hd_bp (bp) = hd; caml_fl_cur_size += Whsize_wosize (0); } } /* If [bp] and [cur] are adjacent, remove [cur] from the free-list and merge them. */ adj = bp + Bosize_hd (hd); if (adj == Hp_bp (cur)){ char *next_cur = Next (cur); mlsize_t cur_whsz = Whsize_bp (cur); if (Wosize_hd (hd) + cur_whsz <= Max_wosize){ Next (prev) = next_cur; if (policy == Policy_next_fit && fl_prev == cur) fl_prev = prev; hd = Make_header (Wosize_hd (hd) + cur_whsz, 0, Caml_blue); Hd_bp (bp) = hd; adj = bp + Bosize_hd (hd); #ifdef DEBUG fl_last = NULL; Next (cur) = (char *) Debug_free_major; Hd_bp (cur) = Debug_free_major; #endif cur = next_cur; } } /* If [prev] and [bp] are adjacent merge them, else insert [bp] into the free-list if it is big enough. */ prev_wosz = Wosize_bp (prev); if (prev + Bsize_wsize (prev_wosz) == Hp_bp (bp) && prev_wosz + Whsize_hd (hd) < Max_wosize){ Hd_bp (prev) = Make_header (prev_wosz + Whsize_hd (hd), 0,Caml_blue); #ifdef DEBUG Hd_bp (bp) = Debug_free_major; #endif Assert (caml_fl_merge == prev); }else if (Wosize_hd (hd) != 0){ Hd_bp (bp) = Bluehd_hd (hd); Next (bp) = cur; Next (prev) = bp; caml_fl_merge = bp; }else{ /* This is a fragment. Leave it in white but remember it for eventual merging with the next block. */ last_fragment = bp; caml_fl_cur_size -= Whsize_wosize (0); } return adj; }
/* [caml_fl_allocate] does not set the header of the newly allocated block. The calling function must do it before any GC function gets called. [caml_fl_allocate] returns a head pointer. */ char *caml_fl_allocate (mlsize_t wo_sz) { char *cur = NULL, *prev, *result; int i; mlsize_t sz, prevsz; Assert (sizeof (char *) == sizeof (value)); Assert (wo_sz >= 1); switch (policy){ case Policy_next_fit: Assert (fl_prev != NULL); /* Search from [fl_prev] to the end of the list. */ prev = fl_prev; cur = Next (prev); while (cur != NULL){ Assert (Is_in_heap (cur)); if (Wosize_bp (cur) >= wo_sz){ return allocate_block (Whsize_wosize (wo_sz), 0, prev, cur); } prev = cur; cur = Next (prev); } fl_last = prev; /* Search from the start of the list to [fl_prev]. */ prev = Fl_head; cur = Next (prev); while (prev != fl_prev){ if (Wosize_bp (cur) >= wo_sz){ return allocate_block (Whsize_wosize (wo_sz), 0, prev, cur); } prev = cur; cur = Next (prev); } /* No suitable block was found. */ return NULL; break; case Policy_first_fit: { /* Search in the flp array. */ for (i = 0; i < flp_size; i++){ sz = Wosize_bp (Next (flp[i])); if (sz >= wo_sz){ #if FREELIST_DEBUG if (i > 5) fprintf (stderr, "FLP: found at %d size=%d\n", i, wo_sz); #endif result = allocate_block (Whsize_wosize (wo_sz), i, flp[i], Next (flp[i])); goto update_flp; } } /* Extend the flp array. */ if (flp_size == 0){ prev = Fl_head; prevsz = 0; }else{ prev = Next (flp[flp_size - 1]); prevsz = Wosize_bp (prev); if (beyond != NULL) prev = beyond; } while (flp_size < FLP_MAX){ cur = Next (prev); if (cur == NULL){ fl_last = prev; beyond = (prev == Fl_head) ? NULL : prev; return NULL; }else{ sz = Wosize_bp (cur); if (sz > prevsz){ flp[flp_size] = prev; ++ flp_size; if (sz >= wo_sz){ beyond = cur; i = flp_size - 1; #if FREELIST_DEBUG if (flp_size > 5){ fprintf (stderr, "FLP: extended to %d\n", flp_size); } #endif result = allocate_block (Whsize_wosize (wo_sz), flp_size - 1, prev, cur); goto update_flp; } prevsz = sz; } } prev = cur; } beyond = cur; /* The flp table is full. Do a slow first-fit search. */ #if FREELIST_DEBUG fprintf (stderr, "FLP: table is full -- slow first-fit\n"); #endif if (beyond != NULL){ prev = beyond; }else{ prev = flp[flp_size - 1]; } prevsz = Wosize_bp (Next (flp[FLP_MAX-1])); Assert (prevsz < wo_sz); cur = Next (prev); while (cur != NULL){ Assert (Is_in_heap (cur)); sz = Wosize_bp (cur); if (sz < prevsz){ beyond = cur; }else if (sz >= wo_sz){ return allocate_block (Whsize_wosize (wo_sz), flp_size, prev, cur); } prev = cur; cur = Next (prev); } fl_last = prev; return NULL; update_flp: /* (i, sz) */ /* The block at [i] was removed or reduced. Update the table. */ Assert (0 <= i && i < flp_size + 1); if (i < flp_size){ if (i > 0){ prevsz = Wosize_bp (Next (flp[i-1])); }else{ prevsz = 0; } if (i == flp_size - 1){ if (Wosize_bp (Next (flp[i])) <= prevsz){ beyond = Next (flp[i]); -- flp_size; }else{ beyond = NULL; } }else{ char *buf [FLP_MAX]; int j = 0; mlsize_t oldsz = sz; prev = flp[i]; while (prev != flp[i+1]){ cur = Next (prev); sz = Wosize_bp (cur); if (sz > prevsz){ buf[j++] = prev; prevsz = sz; if (sz >= oldsz){ Assert (sz == oldsz); break; } } prev = cur; } #if FREELIST_DEBUG if (j > 2) fprintf (stderr, "FLP: update; buf size = %d\n", j); #endif if (FLP_MAX >= flp_size + j - 1){ if (j != 1){ memmove (&flp[i+j], &flp[i+1], sizeof (block *) * (flp_size-i-1)); } if (j > 0) memmove (&flp[i], &buf[0], sizeof (block *) * j); flp_size += j - 1; }else{ if (FLP_MAX > i + j){ if (j != 1){ memmove (&flp[i+j], &flp[i+1], sizeof (block *) * (FLP_MAX-i-j)); } if (j > 0) memmove (&flp[i], &buf[0], sizeof (block *) * j); }else{ if (i != FLP_MAX){ memmove (&flp[i], &buf[0], sizeof (block *) * (FLP_MAX - i)); } } flp_size = FLP_MAX - 1; beyond = Next (flp[FLP_MAX - 1]); } } } return result; } break; default: Assert (0); /* unknown policy */ break; } return NULL; /* NOT REACHED */ }
static void do_compaction_r (CAML_R) { char *ch, *chend; Assert (caml_gc_phase == Phase_idle); caml_gc_message (0x10, "Compacting heap...\n", 0); #ifdef DEBUG caml_heap_check_r (ctx); #endif /* First pass: encode all noninfix headers. */ { ch = caml_heap_start; while (ch != NULL){ header_t *p = (header_t *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ header_t hd = Hd_hp (p); mlsize_t sz = Wosize_hd (hd); if (Is_blue_hd (hd)){ /* Free object. Give it a string tag. */ Hd_hp (p) = Make_ehd (sz, String_tag, 3); }else{ Assert (Is_white_hd (hd)); /* Live object. Keep its tag. */ Hd_hp (p) = Make_ehd (sz, Tag_hd (hd), 3); } p += Whsize_wosize (sz); } ch = Chunk_next (ch); } } /* Second pass: invert pointers. Link infix headers in each block in an inverted list of inverted lists. Don't forget roots and weak pointers. */ { /* Invert roots first because the threads library needs some heap data structures to find its roots. Fortunately, it doesn't need the headers (see above). */ caml_do_roots_r (ctx, invert_root_r); caml_final_do_weak_roots_r (ctx, invert_root_r); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; size_t sz, i; tag_t t; word *infixes; while (Ecolor (q) == 0) q = * (word *) q; sz = Whsize_ehd (q); t = Tag_ehd (q); if (t == Infix_tag){ /* Get the original header of this block. */ infixes = p + sz; q = *infixes; while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3); sz = Whsize_ehd (q); t = Tag_ehd (q); } if (t < No_scan_tag){ for (i = 1; i < sz; i++) invert_pointer_at_r (ctx, &(p[i])); } p += sz; } ch = Chunk_next (ch); } /* Invert weak pointers. */ { value *pp = &caml_weak_list_head; value p; word q; size_t sz, i; while (1){ p = *pp; if (p == (value) NULL) break; q = Hd_val (p); while (Ecolor (q) == 0) q = * (word *) q; sz = Wosize_ehd (q); for (i = 1; i < sz; i++){ if (Field (p,i) != caml_weak_none){ invert_pointer_at_r (ctx, (word *) &(Field (p,i))); } } invert_pointer_at_r (ctx, (word *) pp); pp = &Field (p, 0); } } } /* Third pass: reallocate virtually; revert pointers; decode headers. Rebuild infix headers. */ { init_compact_allocate_r (ctx); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; if (Ecolor (q) == 0 || Tag_ehd (q) == Infix_tag){ /* There were (normal or infix) pointers to this block. */ size_t sz; tag_t t; char *newadr; word *infixes = NULL; while (Ecolor (q) == 0) q = * (word *) q; sz = Whsize_ehd (q); t = Tag_ehd (q); if (t == Infix_tag){ /* Get the original header of this block. */ infixes = p + sz; q = *infixes; Assert (Ecolor (q) == 2); while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3); sz = Whsize_ehd (q); t = Tag_ehd (q); } newadr = compact_allocate_r (ctx, Bsize_wsize (sz)); q = *p; while (Ecolor (q) == 0){ word next = * (word *) q; * (word *) q = (word) Val_hp (newadr); q = next; } *p = Make_header (Wosize_whsize (sz), t, Caml_white); if (infixes != NULL){ /* Rebuild the infix headers and revert the infix pointers. */ while (Ecolor ((word) infixes) != 3){ infixes = (word *) ((word) infixes & ~(uintnat) 3); q = *infixes; while (Ecolor (q) == 2){ word next; q = (word) q & ~(uintnat) 3; next = * (word *) q; * (word *) q = (word) Val_hp ((word *) newadr + (infixes - p)); q = next; } Assert (Ecolor (q) == 1 || Ecolor (q) == 3); *infixes = Make_header (infixes - p, Infix_tag, Caml_white); infixes = (word *) q; } } p += sz; }else{ Assert (Ecolor (q) == 3); /* This is guaranteed only if caml_compact_heap was called after a nonincremental major GC: Assert (Tag_ehd (q) == String_tag); */ /* No pointers to the header and no infix header: the object was free. */ *p = Make_header (Wosize_ehd (q), Tag_ehd (q), Caml_blue); p += Whsize_ehd (q); } } ch = Chunk_next (ch); } } /* Fourth pass: reallocate and move objects. Use the exact same allocation algorithm as pass 3. */ { init_compact_allocate_r (ctx); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; if (Color_hd (q) == Caml_white){ size_t sz = Bhsize_hd (q); char *newadr = compact_allocate_r (ctx, sz); memmove (newadr, p, sz); p += Wsize_bsize (sz); }else{ Assert (Color_hd (q) == Caml_blue); p += Whsize_hd (q); } } ch = Chunk_next (ch); } } /* Shrink the heap if needed. */ { /* Find the amount of live data and the unshrinkable free space. */ asize_t live = 0; asize_t free = 0; asize_t wanted; ch = caml_heap_start; while (ch != NULL){ if (Chunk_alloc (ch) != 0){ live += Wsize_bsize (Chunk_alloc (ch)); free += Wsize_bsize (Chunk_size (ch) - Chunk_alloc (ch)); } ch = Chunk_next (ch); } /* Add up the empty chunks until there are enough, then remove the other empty chunks. */ wanted = caml_percent_free * (live / 100 + 1); ch = caml_heap_start; while (ch != NULL){ char *next_chunk = Chunk_next (ch); /* Chunk_next (ch) will be erased */ if (Chunk_alloc (ch) == 0){ if (free < wanted){ free += Wsize_bsize (Chunk_size (ch)); }else{ caml_shrink_heap_r (ctx, ch); } } ch = next_chunk; } } /* Rebuild the free list. */ { ch = caml_heap_start; caml_fl_reset_r (ctx); while (ch != NULL){ if (Chunk_size (ch) > Chunk_alloc (ch)){ caml_make_free_blocks_r (ctx, (value *) (ch + Chunk_alloc (ch)), Wsize_bsize (Chunk_size(ch)-Chunk_alloc(ch)), 1, Caml_white); } ch = Chunk_next (ch); } } ++ caml_stat_compactions; caml_gc_message (0x10, "done.\n", 0); }