static intnat large_alloc_sweep(struct caml_heap_state* local) { large_alloc* a = local->unswept_large; if (!a) return 0; local->unswept_large = a->next; header_t hd = *(header_t*)((char*)a + LARGE_ALLOC_HEADER_SZ); if (Has_status_hd(hd, global.GARBAGE)) { local->large_bytes_allocated -= Bhsize_hd(hd); free(a); } else { a->next = local->swept_large; local->swept_large = a; } return Whsize_hd(hd); }
static void caml_oldify_one (value v, value *p) { value result; header_t hd; mlsize_t sz, i; tag_t tag; tail_call: if (Is_block (v) && Is_young (v)){ Assert (Hp_val (v) >= caml_domain_state->young_ptr); hd = Hd_val (v); stat_live_bytes += Bhsize_hd(hd); if (Is_promoted_hd (hd)) { *p = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v); } else if (hd == 0){ /* If already forwarded */ *p = Op_val(v)[0]; /* then forward pointer is first field. */ }else{ tag = Tag_hd (hd); if (tag < Infix_tag){ value field0; sz = Wosize_hd (hd); result = alloc_shared (sz, tag); *p = result; if (tag == Stack_tag) { memcpy((void*)result, (void*)v, sizeof(value) * sz); Hd_val (v) = 0; Op_val(v)[0] = result; Op_val(v)[1] = oldify_todo_list; oldify_todo_list = v; } else { field0 = Op_val(v)[0]; Hd_val (v) = 0; /* Set forward flag */ Op_val(v)[0] = result; /* and forward pointer. */ if (sz > 1){ Op_val (result)[0] = field0; Op_val (result)[1] = oldify_todo_list; /* Add this block */ oldify_todo_list = v; /* to the "to do" list. */ }else{ Assert (sz == 1); p = Op_val(result); v = field0; goto tail_call; } } }else if (tag >= No_scan_tag){ sz = Wosize_hd (hd); result = alloc_shared(sz, tag); for (i = 0; i < sz; i++) Op_val (result)[i] = Op_val(v)[i]; Hd_val (v) = 0; /* Set forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ *p = result; }else if (tag == Infix_tag){ mlsize_t offset = Infix_offset_hd (hd); caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */ *p += offset; } else{ value f = Forward_val (v); tag_t ft = 0; int vv = 1; Assert (tag == Forward_tag); if (Is_block (f)){ if (Is_young (f)){ vv = 1; ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f); }else{ vv = 1; if (vv){ ft = Tag_val (f); } } } if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){ /* Do not short-circuit the pointer. Copy as a normal block. */ Assert (Wosize_hd (hd) == 1); result = alloc_shared (1, Forward_tag); *p = result; Hd_val (v) = 0; /* Set (GC) forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ p = Op_val (result); v = f; goto tail_call; }else{ v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } } }else{ *p = v; } }
static void do_compaction_r (CAML_R) { char *ch, *chend; Assert (caml_gc_phase == Phase_idle); caml_gc_message (0x10, "Compacting heap...\n", 0); #ifdef DEBUG caml_heap_check_r (ctx); #endif /* First pass: encode all noninfix headers. */ { ch = caml_heap_start; while (ch != NULL){ header_t *p = (header_t *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ header_t hd = Hd_hp (p); mlsize_t sz = Wosize_hd (hd); if (Is_blue_hd (hd)){ /* Free object. Give it a string tag. */ Hd_hp (p) = Make_ehd (sz, String_tag, 3); }else{ Assert (Is_white_hd (hd)); /* Live object. Keep its tag. */ Hd_hp (p) = Make_ehd (sz, Tag_hd (hd), 3); } p += Whsize_wosize (sz); } ch = Chunk_next (ch); } } /* Second pass: invert pointers. Link infix headers in each block in an inverted list of inverted lists. Don't forget roots and weak pointers. */ { /* Invert roots first because the threads library needs some heap data structures to find its roots. Fortunately, it doesn't need the headers (see above). */ caml_do_roots_r (ctx, invert_root_r); caml_final_do_weak_roots_r (ctx, invert_root_r); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; size_t sz, i; tag_t t; word *infixes; while (Ecolor (q) == 0) q = * (word *) q; sz = Whsize_ehd (q); t = Tag_ehd (q); if (t == Infix_tag){ /* Get the original header of this block. */ infixes = p + sz; q = *infixes; while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3); sz = Whsize_ehd (q); t = Tag_ehd (q); } if (t < No_scan_tag){ for (i = 1; i < sz; i++) invert_pointer_at_r (ctx, &(p[i])); } p += sz; } ch = Chunk_next (ch); } /* Invert weak pointers. */ { value *pp = &caml_weak_list_head; value p; word q; size_t sz, i; while (1){ p = *pp; if (p == (value) NULL) break; q = Hd_val (p); while (Ecolor (q) == 0) q = * (word *) q; sz = Wosize_ehd (q); for (i = 1; i < sz; i++){ if (Field (p,i) != caml_weak_none){ invert_pointer_at_r (ctx, (word *) &(Field (p,i))); } } invert_pointer_at_r (ctx, (word *) pp); pp = &Field (p, 0); } } } /* Third pass: reallocate virtually; revert pointers; decode headers. Rebuild infix headers. */ { init_compact_allocate_r (ctx); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; if (Ecolor (q) == 0 || Tag_ehd (q) == Infix_tag){ /* There were (normal or infix) pointers to this block. */ size_t sz; tag_t t; char *newadr; word *infixes = NULL; while (Ecolor (q) == 0) q = * (word *) q; sz = Whsize_ehd (q); t = Tag_ehd (q); if (t == Infix_tag){ /* Get the original header of this block. */ infixes = p + sz; q = *infixes; Assert (Ecolor (q) == 2); while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3); sz = Whsize_ehd (q); t = Tag_ehd (q); } newadr = compact_allocate_r (ctx, Bsize_wsize (sz)); q = *p; while (Ecolor (q) == 0){ word next = * (word *) q; * (word *) q = (word) Val_hp (newadr); q = next; } *p = Make_header (Wosize_whsize (sz), t, Caml_white); if (infixes != NULL){ /* Rebuild the infix headers and revert the infix pointers. */ while (Ecolor ((word) infixes) != 3){ infixes = (word *) ((word) infixes & ~(uintnat) 3); q = *infixes; while (Ecolor (q) == 2){ word next; q = (word) q & ~(uintnat) 3; next = * (word *) q; * (word *) q = (word) Val_hp ((word *) newadr + (infixes - p)); q = next; } Assert (Ecolor (q) == 1 || Ecolor (q) == 3); *infixes = Make_header (infixes - p, Infix_tag, Caml_white); infixes = (word *) q; } } p += sz; }else{ Assert (Ecolor (q) == 3); /* This is guaranteed only if caml_compact_heap was called after a nonincremental major GC: Assert (Tag_ehd (q) == String_tag); */ /* No pointers to the header and no infix header: the object was free. */ *p = Make_header (Wosize_ehd (q), Tag_ehd (q), Caml_blue); p += Whsize_ehd (q); } } ch = Chunk_next (ch); } } /* Fourth pass: reallocate and move objects. Use the exact same allocation algorithm as pass 3. */ { init_compact_allocate_r (ctx); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; if (Color_hd (q) == Caml_white){ size_t sz = Bhsize_hd (q); char *newadr = compact_allocate_r (ctx, sz); memmove (newadr, p, sz); p += Wsize_bsize (sz); }else{ Assert (Color_hd (q) == Caml_blue); p += Whsize_hd (q); } } ch = Chunk_next (ch); } } /* Shrink the heap if needed. */ { /* Find the amount of live data and the unshrinkable free space. */ asize_t live = 0; asize_t free = 0; asize_t wanted; ch = caml_heap_start; while (ch != NULL){ if (Chunk_alloc (ch) != 0){ live += Wsize_bsize (Chunk_alloc (ch)); free += Wsize_bsize (Chunk_size (ch) - Chunk_alloc (ch)); } ch = Chunk_next (ch); } /* Add up the empty chunks until there are enough, then remove the other empty chunks. */ wanted = caml_percent_free * (live / 100 + 1); ch = caml_heap_start; while (ch != NULL){ char *next_chunk = Chunk_next (ch); /* Chunk_next (ch) will be erased */ if (Chunk_alloc (ch) == 0){ if (free < wanted){ free += Wsize_bsize (Chunk_size (ch)); }else{ caml_shrink_heap_r (ctx, ch); } } ch = next_chunk; } } /* Rebuild the free list. */ { ch = caml_heap_start; caml_fl_reset_r (ctx); while (ch != NULL){ if (Chunk_size (ch) > Chunk_alloc (ch)){ caml_make_free_blocks_r (ctx, (value *) (ch + Chunk_alloc (ch)), Wsize_bsize (Chunk_size(ch)-Chunk_alloc(ch)), 1, Caml_white); } ch = Chunk_next (ch); } } ++ caml_stat_compactions; caml_gc_message (0x10, "done.\n", 0); }
/* Note that the tests on the tag depend on the fact that Infix_tag, Forward_tag, and No_scan_tag are contiguous. */ static void oldify_one (void* st_v, value v, value *p) { struct oldify_state* st = st_v; value result; header_t hd; mlsize_t sz, i; mlsize_t infix_offset; tag_t tag; caml_domain_state* domain_state = st->promote_domain ? st->promote_domain->state : Caml_state; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; CAMLassert (domain_state->young_start <= domain_state->young_ptr && domain_state->young_ptr <= domain_state->young_end); tail_call: if (!(Is_block(v) && is_in_interval((value)Hp_val(v), young_ptr, young_end))) { /* not a minor block */ *p = v; return; } infix_offset = 0; do { hd = Hd_val (v); if (hd == 0) { /* already forwarded, forward pointer is first field. */ *p = Op_val(v)[0] + infix_offset; return; } tag = Tag_hd (hd); if (tag == Infix_tag) { /* Infix header, retry with the real block */ CAMLassert (infix_offset == 0); infix_offset = Infix_offset_hd (hd); CAMLassert(infix_offset > 0); v -= infix_offset; } } while (tag == Infix_tag); if (((value)Hp_val(v)) > st->oldest_promoted) { st->oldest_promoted = (value)Hp_val(v); } if (tag == Cont_tag) { struct stack_info* stk = Ptr_val(Op_val(v)[0]); CAMLassert(Wosize_hd(hd) == 1 && infix_offset == 0); result = alloc_shared(1, Cont_tag); *p = result; Op_val(result)[0] = Val_ptr(stk); *Hp_val (v) = 0; Op_val(v)[0] = result; if (stk != NULL) caml_scan_stack(&oldify_one, st, stk); } else if (tag < Infix_tag) { value field0; sz = Wosize_hd (hd); st->live_bytes += Bhsize_hd(hd); result = alloc_shared (sz, tag); *p = result + infix_offset; field0 = Op_val(v)[0]; CAMLassert (!Is_debug_tag(field0)); *Hp_val (v) = 0; /* Set forward flag */ Op_val(v)[0] = result; /* and forward pointer. */ if (sz > 1){ Op_val (result)[0] = field0; Op_val (result)[1] = st->todo_list; /* Add this block */ st->todo_list = v; /* to the "to do" list. */ }else{ CAMLassert (sz == 1); p = Op_val(result); v = field0; goto tail_call; } } else if (tag >= No_scan_tag) { sz = Wosize_hd (hd); st->live_bytes += Bhsize_hd(hd); result = alloc_shared(sz, tag); for (i = 0; i < sz; i++) { value curr = Op_val(v)[i]; Op_val (result)[i] = curr; } *Hp_val (v) = 0; /* Set forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ CAMLassert (infix_offset == 0); *p = result; } else { CAMLassert (tag == Forward_tag); CAMLassert (infix_offset == 0); value f = Forward_val (v); tag_t ft = 0; if (Is_block (f)) { ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f); } if (ft == Forward_tag || ft == Lazy_tag || ft == Double_tag) { /* Do not short-circuit the pointer. Copy as a normal block. */ CAMLassert (Wosize_hd (hd) == 1); st->live_bytes += Bhsize_hd(hd); result = alloc_shared (1, Forward_tag); *p = result; *Hp_val (v) = 0; /* Set (GC) forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ p = Op_val (result); v = f; goto tail_call; } else { v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } }