CAMLexport void caml_stat_free (void * p) { if (p == NULL) return; Assert(Wosize_val((value)p) == STAT_ALLOC_MAGIC); Assert(Tag_val((value)p) == Abstract_tag); free (Hp_val((value)p)); }
static void invert_pointer_at (word *p) { word q = *p; Assert (Ecolor ((intnat) p) == 0); /* Use Ecolor (q) == 0 instead of Is_block (q) because q could be an inverted pointer for an infix header (with Ecolor == 2). */ if (Ecolor (q) == 0 && (Classify_addr (q) & In_heap)) { switch (Ecolor (Hd_val (q))) { case 0: case 3: /* Pointer or header: insert in inverted list. */ *p = Hd_val (q); Hd_val (q) = (header_t) p; break; case 1: /* Infix header: make inverted infix list. */ /* Double inversion: the last of the inverted infix list points to the next infix header in this block. The last of the last list contains the original block header. */ { /* This block as a value. */ value val = (value) q - Infix_offset_val (q); /* Get the block header. */ word *hp = (word *) Hp_val (val); while (Ecolor (*hp) == 0) hp = (word *) *hp; Assert (Ecolor (*hp) == 3); if (Tag_ehd (*hp) == Closure_tag) { /* This is the first infix found in this block. */ /* Save original header. */ *p = *hp; /* Link inverted infix list. */ Hd_val (q) = (header_t) ((word) p | 2); /* Change block header's tag to Infix_tag, and change its size to point to the infix list. */ *hp = Make_ehd (Wosize_bhsize (q - val), Infix_tag, 3); } else { Assert (Tag_ehd (*hp) == Infix_tag); /* Point the last of this infix list to the current first infix list of the block. */ *p = (word) &Field (val, Wosize_ehd (*hp)) | 1; /* Point the head of this infix list to the above. */ Hd_val (q) = (header_t) ((word) p | 2); /* Change block header's size to point to this infix list. */ *hp = Make_ehd (Wosize_bhsize (q - val), Infix_tag, 3); } } break; case 2: /* Inverted infix list: insert. */ *p = Hd_val (q); Hd_val (q) = (header_t) ((word) p | 2); break; } } }
struct domain* caml_owner_of_shared_block(value v) { Assert (Is_block(v) && !Is_minor(v)); mlsize_t whsize = Whsize_wosize(Wosize_val(v)); Assert (whsize > 0); /* not an atom */ if (whsize <= SIZECLASS_MAX) { /* FIXME: ORD: if we see the object, we must see the owner */ pool* p = (pool*)((uintnat)v &~(POOL_WSIZE * sizeof(value) - 1)); return p->owner; } else { large_alloc* a = (large_alloc*)(Hp_val(v) - LARGE_ALLOC_HEADER_SZ); return a->owner; } }
CAMLexport void * caml_stat_resize (void * p, asize_t sz) { void * result; if (p == NULL) return caml_stat_alloc(sz); result = realloc (Hp_val((value)p), sizeof(value) + sz); if (result == NULL) { caml_stat_free(p); caml_raise_out_of_memory (); } return (void*)Val_hp(result); }
static void oldify (value *p, value v) { value result; mlsize_t i; tail_call: if (IS_BLOCK(v) && Is_young (v)){ assert (Hp_val (v) < young_ptr); if (Is_blue_val (v)){ /* Already forwarded ? */ *p = Field (v, 0); /* Then the forward pointer is the first field. */ }else if (Tag_val (v) >= No_scan_tag){ result = alloc_shr (Wosize_val (v), Tag_val (v)); bcopy (Bp_val (v), Bp_val (result), Bosize_val (v)); Hd_val (v) = Bluehd_hd (Hd_val (v)); /* Put the forward flag. */ Field (v, 0) = result; /* And the forward pointer. */ *p = result; }else{ /* We can do recursive calls before all the fields are filled, because we will not be calling the major GC. */ value field0 = Field (v, 0); mlsize_t sz = Wosize_val (v); result = alloc_shr (sz, Tag_val (v)); *p = result; Hd_val (v) = Bluehd_hd (Hd_val (v)); /* Put the forward flag. */ Field (v, 0) = result; /* And the forward pointer. */ if (sz == 1){ p = &Field (result, 0); v = field0; goto tail_call; }else{ oldify (&Field (result, 0), field0); for (i = 1; i < sz - 1; i++){ oldify (&Field (result, i), Field (v, i)); } p = &Field (result, i); v = Field (v, i); goto tail_call; } } }else{ *p = v; } }
void forward_pointer (void* state, value v, value *p) { header_t hd; mlsize_t offset; value fwd; struct domain* promote_domain = state; caml_domain_state* domain_state = promote_domain ? promote_domain->state : Caml_state; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; if (Is_block (v) && is_in_interval((value)Hp_val(v), young_ptr, young_end)) { hd = Hd_val(v); if (hd == 0) { *p = Op_val(v)[0]; CAMLassert (Is_block(*p) && !Is_minor(*p)); } else if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); fwd = 0; forward_pointer (state, v - offset, &fwd); if (fwd) *p = fwd + offset; } } }
void caml_oldify_one (value v, value *p) { value result; header_t hd; mlsize_t sz, i; tag_t tag; tail_call: if (Is_block (v) && Is_young (v)){ if (Hp_val(v) < caml_young_ptr) printf("%lx, %lx\n", Hp_val(v), caml_young_ptr); Assert (Hp_val (v) >= caml_young_ptr); hd = Hd_val (v); if (hd == 0){ /* If already forwarded */ *p = Field (v, 0); /* then forward pointer is first field. */ }else{ tag = Tag_hd (hd); if (tag < Infix_tag){ value field0; sz = Wosize_hd (hd); result = caml_alloc_shr (sz, tag); *p = result; field0 = Field (v, 0); Hd_val (v) = 0; /* Set forward flag */ Field (v, 0) = result; /* and forward pointer. */ if (sz > 1){ Field (result, 0) = field0; Field (result, 1) = oldify_todo_list; /* Add this block */ oldify_todo_list = v; /* to the "to do" list. */ }else{ Assert (sz == 1); p = &Field (result, 0); v = field0; goto tail_call; } }else if (tag >= No_scan_tag){ sz = Wosize_hd (hd); result = caml_alloc_shr (sz, tag); for (i = 0; i < sz; i++) Field (result, i) = Field (v, i); Hd_val (v) = 0; /* Set forward flag */ Field (v, 0) = result; /* and forward pointer. */ *p = result; }else if (tag == Infix_tag){ mlsize_t offset = Infix_offset_hd (hd); caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */ *p += offset; }else{ value f = Forward_val (v); tag_t ft = 0; int vv = 1; Assert (tag == Forward_tag); if (Is_block (f)){ vv = Is_in_value_area(f); if (vv) { ft = Tag_val (Hd_val (f) == 0 ? Field (f, 0) : f); } } if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){ /* Do not short-circuit the pointer. Copy as a normal block. */ Assert (Wosize_hd (hd) == 1); result = caml_alloc_shr (1, Forward_tag); *p = result; Hd_val (v) = 0; /* Set (GC) forward flag */ Field (v, 0) = result; /* and forward pointer. */ p = &Field (result, 0); v = f; goto tail_call; }else{ v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } } }else{ *p = v; } }
int netsys_init_value_1(struct htab *t, struct nqueue *q, char *dest, char *dest_end, value orig, int enable_bigarrays, int enable_customs, int enable_atoms, int simulation, void *target_addr, struct named_custom_ops *target_custom_ops, int color, intnat *start_offset, intnat *bytelen ) { void *orig_addr; void *work_addr; value work; int work_tag; char *work_header; size_t work_bytes; size_t work_words; void *copy_addr; value copy; char *copy_header; header_t copy_header1; int copy_tag; size_t copy_words; void *fixup_addr; char *dest_cur; char *dest_ptr; int code, i; intnat addr_delta; struct named_custom_ops *ops_ptr; void *int32_target_ops; void *int64_target_ops; void *nativeint_target_ops; void *bigarray_target_ops; copy = 0; dest_cur = dest; addr_delta = ((char *) target_addr) - dest; if (dest_cur >= dest_end && !simulation) return (-4); /* out of space */ if (!Is_block(orig)) return (-2); orig_addr = (void *) orig; code = netsys_queue_add(q, orig_addr); if (code != 0) return code; /* initialize *_target_ops */ bigarray_target_ops = NULL; int32_target_ops = NULL; int64_target_ops = NULL; nativeint_target_ops = NULL; ops_ptr = target_custom_ops; while (ops_ptr != NULL) { if (strcmp(ops_ptr->name, "_bigarray") == 0) bigarray_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_i") == 0) int32_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_j") == 0) int64_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_n") == 0) nativeint_target_ops = ops_ptr->ops; ops_ptr = ops_ptr->next; }; /* First pass: Iterate over the addresses found in q. Ignore addresses already seen in the past (which are in t). For new addresses, make a copy, and add these copies to t. */ /* fprintf(stderr, "first pass, orig_addr=%lx simulation=%d addr_delta=%lx\n", (unsigned long) orig_addr, simulation, addr_delta); */ code = netsys_queue_take(q, &work_addr); while (code != (-3)) { if (code != 0) return code; /* fprintf(stderr, "work_addr=%lx\n", (unsigned long) work_addr); */ code = netsys_htab_lookup(t, work_addr, ©_addr); if (code != 0) return code; if (copy_addr == NULL) { /* The address is unknown, so copy the value */ /* Body of first pass */ work = (value) work_addr; work_tag = Tag_val(work); work_header = Hp_val(work); if (work_tag < No_scan_tag) { /* It is a scanned value (with subvalues) */ switch(work_tag) { case Object_tag: case Closure_tag: case Lazy_tag: case Forward_tag: return (-2); /* unsupported */ } work_words = Wosize_hp(work_header); if (work_words == 0) { if (!enable_atoms) return (-2); if (enable_atoms == 1) goto next; }; /* Do the copy. */ work_bytes = Bhsize_hp(work_header); copy_header = dest_cur; dest_cur += work_bytes; if (dest_cur > dest_end && !simulation) return (-4); if (simulation) copy_addr = work_addr; else { memcpy(copy_header, work_header, work_bytes); copy = Val_hp(copy_header); copy_addr = (void *) copy; Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color; } /* Add the association (work_addr -> copy_addr) to t: */ code = netsys_htab_add(t, work_addr, copy_addr); if (code < 0) return code; /* Add the sub values of work_addr to q: */ for (i=0; i < work_words; ++i) { value field = Field(work, i); if (Is_block (field)) { code = netsys_queue_add(q, (void *) field); if (code != 0) return code; } } } else { /* It an opaque value */ int do_copy = 0; int do_bigarray = 0; void *target_ops = NULL; char caml_id = ' '; /* only b, i, j, n */ /* Check for bigarrays and other custom blocks */ switch (work_tag) { case Abstract_tag: return(-2); case String_tag: do_copy = 1; break; case Double_tag: do_copy = 1; break; case Double_array_tag: do_copy = 1; break; case Custom_tag: { struct custom_operations *custom_ops; char *id; custom_ops = Custom_ops_val(work); id = custom_ops->identifier; if (id[0] == '_') { switch (id[1]) { case 'b': if (!enable_bigarrays) return (-2); if (strcmp(id, "_bigarray") == 0) { caml_id = 'b'; break; } case 'i': /* int32 */ case 'j': /* int64 */ case 'n': /* nativeint */ if (!enable_customs) return (-2); if (id[2] == 0) { caml_id = id[1]; break; } default: return (-2); } } else return (-2); } }; /* switch */ switch (caml_id) { /* look closer at some cases */ case 'b': { target_ops = bigarray_target_ops; do_copy = 1; do_bigarray = 1; break; } case 'i': target_ops = int32_target_ops; do_copy = 1; break; case 'j': target_ops = int64_target_ops; do_copy = 1; break; case 'n': target_ops = nativeint_target_ops; do_copy = 1; break; }; if (do_copy) { /* Copy the value */ work_bytes = Bhsize_hp(work_header); copy_header = dest_cur; dest_cur += work_bytes; if (simulation) copy_addr = work_addr; else { if (dest_cur > dest_end) return (-4); memcpy(copy_header, work_header, work_bytes); copy = Val_hp(copy_header); copy_addr = (void *) copy; Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color; if (target_ops != NULL) Custom_ops_val(copy) = target_ops; } code = netsys_htab_add(t, work_addr, copy_addr); if (code < 0) return code; } if (do_bigarray) { /* postprocessing for copying bigarrays */ struct caml_ba_array *b_work, *b_copy; void * data_copy; char * data_header; header_t data_header1; size_t size = 1; size_t size_aligned; size_t size_words; b_work = Bigarray_val(work); b_copy = Bigarray_val(copy); for (i = 0; i < b_work->num_dims; i++) { size = size * b_work->dim[i]; }; size = size * caml_ba_element_size[b_work->flags & BIGARRAY_KIND_MASK]; size_aligned = size; if (size%sizeof(void *) != 0) size_aligned += sizeof(void *) - (size%sizeof(void *)); size_words = Wsize_bsize(size_aligned); /* If we put the copy of the bigarray into our own dest buffer, also generate an abstract header, so it can be skipped when iterating over it. We use here a special representation, so we can encode any length in this header (with a normal Ocaml header we are limited by Max_wosize, e.g. 16M on 32 bit systems). The special representation is an Abstract_tag with zero length, followed by the real length (in words) */ if (enable_bigarrays == 2) { data_header = dest_cur; dest_cur += 2*sizeof(void *); data_copy = dest_cur; dest_cur += size_aligned; } else if (!simulation) { data_header = NULL; data_copy = stat_alloc(size_aligned); }; if (!simulation) { if (dest_cur > dest_end) return (-4); /* Initialize header: */ if (data_header != NULL) { data_header1 = Abstract_tag; memcpy(data_header, (char *) &data_header1, sizeof(header_t)); memcpy(data_header + sizeof(header_t), (size_t *) &size_words, sizeof(size_t)); }; /* Copy bigarray: */ memcpy(data_copy, b_work->data, size); b_copy->data = data_copy; b_copy->proxy = NULL; /* If the copy is in our own buffer, it is now externally managed. */ b_copy->flags = (b_copy->flags & ~CAML_BA_MANAGED_MASK) | (enable_bigarrays == 2 ? CAML_BA_EXTERNAL : CAML_BA_MANAGED); } } } /* if (work_tag < No_scan_tag) */ } /* if (copy_addr == NULL) */ /* Switch to next address in q: */ next: code = netsys_queue_take(q, &work_addr); } /* while */ /* Second pass. The copied blocks still have fields pointing to the original blocks. We fix that now by iterating once over the copied memory block. */ if (!simulation) { /* fprintf(stderr, "second pass\n"); */ dest_ptr = dest; while (dest_ptr < dest_cur) { copy_header1 = *((header_t *) dest_ptr); copy_tag = Tag_hd(copy_header1); copy_words = Wosize_hd(copy_header1); copy = (value) (dest_ptr + sizeof(void *)); if (copy_tag < No_scan_tag) { for (i=0; i < copy_words; ++i) { value field = Field(copy, i); if (Is_block (field)) { /* It is a pointer. Try to fix it up. */ code = netsys_htab_lookup(t, (void *) field, &fixup_addr); if (code != 0) return code; if (fixup_addr != NULL) Field(copy,i) = (value) (((char *) fixup_addr) + addr_delta); } } } else if (copy_tag == Abstract_tag && copy_words == 0) { /* our special representation for skipping data regions */ copy_words = ((size_t *) dest_ptr)[1] + 1; }; dest_ptr += (copy_words + 1) * sizeof(void *); } } /* hey, fine. Return result */ *start_offset = sizeof(void *); *bytelen = dest_cur - dest; /* fprintf(stderr, "return regularly\n");*/ return 0; }
CAMLprim value netsys_hdr_address(value objv) { return caml_copy_nativeint((intnat) Hp_val(objv)); }
/* Make sure the minor heap is empty by performing a minor collection if * needed. */ void caml_empty_minor_heap (void) { uintnat minor_allocated_bytes = caml_domain_state->young_end - caml_domain_state->young_ptr; unsigned rewritten = 0; struct caml_ref_entry *r; caml_save_stack_gc(); stat_live_bytes = 0; if (minor_allocated_bytes != 0){ caml_gc_log ("Minor collection starting"); caml_do_local_roots(&caml_oldify_one, caml_domain_self()); for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){ value x; caml_oldify_one (Op_val(r->obj)[r->field], &x); } for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) { caml_scan_dirty_stack(&caml_oldify_one, r->obj); } caml_oldify_mopup (); for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){ value v = Op_val(r->obj)[r->field]; if (Is_block(v) && Is_young(v)) { Assert (Hp_val (v) >= caml_domain_state->young_ptr); value vnew; header_t hd = Hd_val(v); // FIXME: call oldify_one here? if (Is_promoted_hd(hd)) { vnew = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v); } else { int offset = 0; if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); v -= offset; } Assert (Hd_val (v) == 0); vnew = Op_val(v)[0] + offset; } Assert(Is_block(vnew) && !Is_young(vnew)); Assert(Hd_val(vnew)); if (Tag_hd(hd) == Infix_tag) { Assert(Tag_val(vnew) == Infix_tag); } rewritten += caml_atomic_cas_field(r->obj, r->field, v, vnew); } } caml_addrmap_iter(&caml_domain_state->remembered_set->promotion, unpin_promoted_object); if (caml_domain_state->young_ptr < caml_domain_state->young_start) caml_domain_state->young_ptr = caml_domain_state->young_start; caml_stat_minor_words += Wsize_bsize (minor_allocated_bytes); caml_domain_state->young_ptr = caml_domain_state->young_end; clear_table (&caml_domain_state->remembered_set->ref); caml_addrmap_clear(&caml_domain_state->remembered_set->promotion); caml_addrmap_clear(&caml_domain_state->remembered_set->promotion_rev); caml_gc_log ("Minor collection completed: %u of %u kb live, %u pointers rewritten", (unsigned)stat_live_bytes/1024, (unsigned)minor_allocated_bytes/1024, rewritten); } for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) { caml_scan_dirty_stack(&caml_darken, r->obj); caml_clean_stack(r->obj); } clear_table (&caml_domain_state->remembered_set->fiber_ref); caml_restore_stack_gc(); #ifdef DEBUG { value *p; for (p = (value *) caml_domain_state->young_start; p < (value *) caml_domain_state->young_end; ++p){ *p = Debug_free_minor; } ++ minor_gc_counter; } #endif }
static void caml_oldify_one (value v, value *p) { value result; header_t hd; mlsize_t sz, i; tag_t tag; tail_call: if (Is_block (v) && Is_young (v)){ Assert (Hp_val (v) >= caml_domain_state->young_ptr); hd = Hd_val (v); stat_live_bytes += Bhsize_hd(hd); if (Is_promoted_hd (hd)) { *p = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v); } else if (hd == 0){ /* If already forwarded */ *p = Op_val(v)[0]; /* then forward pointer is first field. */ }else{ tag = Tag_hd (hd); if (tag < Infix_tag){ value field0; sz = Wosize_hd (hd); result = alloc_shared (sz, tag); *p = result; if (tag == Stack_tag) { memcpy((void*)result, (void*)v, sizeof(value) * sz); Hd_val (v) = 0; Op_val(v)[0] = result; Op_val(v)[1] = oldify_todo_list; oldify_todo_list = v; } else { field0 = Op_val(v)[0]; Hd_val (v) = 0; /* Set forward flag */ Op_val(v)[0] = result; /* and forward pointer. */ if (sz > 1){ Op_val (result)[0] = field0; Op_val (result)[1] = oldify_todo_list; /* Add this block */ oldify_todo_list = v; /* to the "to do" list. */ }else{ Assert (sz == 1); p = Op_val(result); v = field0; goto tail_call; } } }else if (tag >= No_scan_tag){ sz = Wosize_hd (hd); result = alloc_shared(sz, tag); for (i = 0; i < sz; i++) Op_val (result)[i] = Op_val(v)[i]; Hd_val (v) = 0; /* Set forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ *p = result; }else if (tag == Infix_tag){ mlsize_t offset = Infix_offset_hd (hd); caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */ *p += offset; } else{ value f = Forward_val (v); tag_t ft = 0; int vv = 1; Assert (tag == Forward_tag); if (Is_block (f)){ if (Is_young (f)){ vv = 1; ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f); }else{ vv = 1; if (vv){ ft = Tag_val (f); } } } if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){ /* Do not short-circuit the pointer. Copy as a normal block. */ Assert (Wosize_hd (hd) == 1); result = alloc_shared (1, Forward_tag); *p = result; Hd_val (v) = 0; /* Set (GC) forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ p = Op_val (result); v = f; goto tail_call; }else{ v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } } }else{ *p = v; } }
void caml_empty_minor_heap_domain (struct domain* domain) { CAMLnoalloc; caml_domain_state* domain_state = domain->state; struct caml_minor_tables *minor_tables = domain_state->minor_tables; unsigned rewrite_successes = 0; unsigned rewrite_failures = 0; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; uintnat minor_allocated_bytes = young_end - young_ptr; struct oldify_state st = {0}; value **r; struct caml_ephe_ref_elt *re; struct caml_custom_elt *elt; st.promote_domain = domain; if (minor_allocated_bytes != 0) { uintnat prev_alloc_words = domain_state->allocated_words; #ifdef DEBUG /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers from older to younger objects */ { struct addrmap young_young_ptrs = ADDRMAP_INIT; mlsize_t i; value iter; for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) { *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1; } for (iter = (value)young_ptr; iter < (value)young_end; iter = next_minor_block(domain_state, iter)) { value hd = Hd_hp(iter); if (hd != 0) { value curr = Val_hp(iter); tag_t tag = Tag_hd (hd); if (tag < No_scan_tag && tag != Cont_tag) { // FIXME: should scan Cont_tag for (i = 0; i < Wosize_hd(hd); i++) { value* f = Op_val(curr) + i; if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) && *f < curr) { CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f)); } } } } } caml_addrmap_clear(&young_young_ptrs); } #endif caml_gc_log ("Minor collection of domain %d starting", domain->state->id); caml_ev_begin("minor_gc"); caml_ev_begin("minor_gc/roots"); caml_do_local_roots(&oldify_one, &st, domain, 0); caml_scan_stack(&oldify_one, &st, domain_state->current_stack); for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value x = **r; oldify_one (&st, x, &x); } caml_ev_end("minor_gc/roots"); caml_ev_begin("minor_gc/promote"); oldify_mopup (&st); caml_ev_end("minor_gc/promote"); caml_ev_begin("minor_gc/ephemerons"); for (re = minor_tables->ephe_ref.base; re < minor_tables->ephe_ref.ptr; re++) { CAMLassert (Ephe_domain(re->ephe) == domain); if (re->offset == CAML_EPHE_DATA_OFFSET) { /* Data field has already been handled in oldify_mopup. Handle only * keys here. */ continue; } value* key = &Op_val(re->ephe)[re->offset]; if (*key != caml_ephe_none && Is_block(*key) && is_in_interval(*key, young_ptr, young_end)) { resolve_infix_val(key); if (Hd_val(*key) == 0) { /* value copied to major heap */ *key = Op_val(*key)[0]; } else { CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end)); *key = caml_ephe_none; Ephe_data(re->ephe) = caml_ephe_none; } } } caml_ev_end("minor_gc/ephemerons"); caml_ev_begin("minor_gc/update_minor_tables"); for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value v = **r; if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) { value vnew; header_t hd = Hd_val(v); int offset = 0; if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); v -= offset; } CAMLassert (Hd_val(v) == 0); vnew = Op_val(v)[0] + offset; CAMLassert (Is_block(vnew) && !Is_minor(vnew)); CAMLassert (Hd_val(vnew)); if (Tag_hd(hd) == Infix_tag) { CAMLassert(Tag_val(vnew) == Infix_tag); v += offset; } if (caml_domain_alone()) { **r = vnew; ++rewrite_successes; } else { if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew)) ++rewrite_successes; else ++rewrite_failures; } } } CAMLassert (!caml_domain_alone() || rewrite_failures == 0); caml_ev_end("minor_gc/update_minor_tables"); caml_ev_begin("minor_gc/finalisers"); caml_final_update_last_minor(domain); /* Run custom block finalisation of dead minor values */ for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) { value v = elt->block; if (Hd_val(v) == 0) { /* !!caml_adjust_gc_speed(elt->mem, elt->max); */ } else { /* Block will be freed: call finalisation function, if any */ void (*final_fun)(value) = Custom_ops_val(v)->finalize; if (final_fun != NULL) final_fun(v); } } caml_final_empty_young(domain); caml_ev_end("minor_gc/finalisers"); clear_table ((struct generic_table *)&minor_tables->major_ref); clear_table ((struct generic_table *)&minor_tables->minor_ref); clear_table ((struct generic_table *)&minor_tables->ephe_ref); clear_table ((struct generic_table *)&minor_tables->custom); domain_state->young_ptr = domain_state->young_end; domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes); domain_state->stat_minor_collections++; domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words; caml_ev_end("minor_gc"); caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u", domain->state->id, 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes, (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures); } else { caml_final_empty_young(domain); caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id); } #ifdef DEBUG { value *p; for (p = (value *) domain_state->young_start; p < (value *) domain_state->young_end; ++p){ *p = Debug_free_minor; } } #endif }
/* Finish the work that was put off by [oldify_one]. Note that [oldify_one] itself is called by oldify_mopup, so we have to be careful to remove the first entry from the list before oldifying its fields. */ static void oldify_mopup (struct oldify_state* st) { value v, new_v, f; mlsize_t i; caml_domain_state* domain_state = st->promote_domain ? st->promote_domain->state : Caml_state; struct caml_ephe_ref_table ephe_ref_table = domain_state->minor_tables->ephe_ref; struct caml_ephe_ref_elt *re; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; int redo = 0; while (st->todo_list != 0) { v = st->todo_list; /* Get the head. */ CAMLassert (Hd_val (v) == 0); /* It must be forwarded. */ new_v = Op_val (v)[0]; /* Follow forward pointer. */ st->todo_list = Op_val (new_v)[1]; /* Remove from list. */ f = Op_val (new_v)[0]; CAMLassert (!Is_debug_tag(f)); if (Is_block (f) && is_in_interval((value)Hp_val(v), young_ptr, young_end)) { oldify_one (st, f, Op_val (new_v)); } for (i = 1; i < Wosize_val (new_v); i++){ f = Op_val (v)[i]; CAMLassert (!Is_debug_tag(f)); if (Is_block (f) && is_in_interval((value)Hp_val(v), young_ptr, young_end)) { oldify_one (st, f, Op_val (new_v) + i); } else { Op_val (new_v)[i] = f; } } CAMLassert (Wosize_val(new_v)); } /* Oldify the data in the minor heap of alive ephemeron During minor collection keys outside the minor heap are considered alive */ for (re = ephe_ref_table.base; re < ephe_ref_table.ptr; re++) { /* look only at ephemeron with data in the minor heap */ if (re->offset == CAML_EPHE_DATA_OFFSET) { value *data = &Ephe_data(re->ephe); if (*data != caml_ephe_none && Is_block(*data) && is_in_interval(*data, young_ptr, young_end)) { resolve_infix_val(data); if (Hd_val(*data) == 0) { /* Value copied to major heap */ *data = Op_val(*data)[0]; } else { if (ephe_check_alive_data(re, young_ptr, young_end)) { oldify_one(st, *data, data); redo = 1; /* oldify_todo_list can still be 0 */ } } } } } if (redo) oldify_mopup (st); }
/* Note that the tests on the tag depend on the fact that Infix_tag, Forward_tag, and No_scan_tag are contiguous. */ static void oldify_one (void* st_v, value v, value *p) { struct oldify_state* st = st_v; value result; header_t hd; mlsize_t sz, i; mlsize_t infix_offset; tag_t tag; caml_domain_state* domain_state = st->promote_domain ? st->promote_domain->state : Caml_state; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; CAMLassert (domain_state->young_start <= domain_state->young_ptr && domain_state->young_ptr <= domain_state->young_end); tail_call: if (!(Is_block(v) && is_in_interval((value)Hp_val(v), young_ptr, young_end))) { /* not a minor block */ *p = v; return; } infix_offset = 0; do { hd = Hd_val (v); if (hd == 0) { /* already forwarded, forward pointer is first field. */ *p = Op_val(v)[0] + infix_offset; return; } tag = Tag_hd (hd); if (tag == Infix_tag) { /* Infix header, retry with the real block */ CAMLassert (infix_offset == 0); infix_offset = Infix_offset_hd (hd); CAMLassert(infix_offset > 0); v -= infix_offset; } } while (tag == Infix_tag); if (((value)Hp_val(v)) > st->oldest_promoted) { st->oldest_promoted = (value)Hp_val(v); } if (tag == Cont_tag) { struct stack_info* stk = Ptr_val(Op_val(v)[0]); CAMLassert(Wosize_hd(hd) == 1 && infix_offset == 0); result = alloc_shared(1, Cont_tag); *p = result; Op_val(result)[0] = Val_ptr(stk); *Hp_val (v) = 0; Op_val(v)[0] = result; if (stk != NULL) caml_scan_stack(&oldify_one, st, stk); } else if (tag < Infix_tag) { value field0; sz = Wosize_hd (hd); st->live_bytes += Bhsize_hd(hd); result = alloc_shared (sz, tag); *p = result + infix_offset; field0 = Op_val(v)[0]; CAMLassert (!Is_debug_tag(field0)); *Hp_val (v) = 0; /* Set forward flag */ Op_val(v)[0] = result; /* and forward pointer. */ if (sz > 1){ Op_val (result)[0] = field0; Op_val (result)[1] = st->todo_list; /* Add this block */ st->todo_list = v; /* to the "to do" list. */ }else{ CAMLassert (sz == 1); p = Op_val(result); v = field0; goto tail_call; } } else if (tag >= No_scan_tag) { sz = Wosize_hd (hd); st->live_bytes += Bhsize_hd(hd); result = alloc_shared(sz, tag); for (i = 0; i < sz; i++) { value curr = Op_val(v)[i]; Op_val (result)[i] = curr; } *Hp_val (v) = 0; /* Set forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ CAMLassert (infix_offset == 0); *p = result; } else { CAMLassert (tag == Forward_tag); CAMLassert (infix_offset == 0); value f = Forward_val (v); tag_t ft = 0; if (Is_block (f)) { ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f); } if (ft == Forward_tag || ft == Lazy_tag || ft == Double_tag) { /* Do not short-circuit the pointer. Copy as a normal block. */ CAMLassert (Wosize_hd (hd) == 1); st->live_bytes += Bhsize_hd(hd); result = alloc_shared (1, Forward_tag); *p = result; *Hp_val (v) = 0; /* Set (GC) forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ p = Op_val (result); v = f; goto tail_call; } else { v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } }
/* [caml_fl_merge_block] returns the head pointer of the next block after [bp], because merging blocks may change the size of [bp]. */ header_t *caml_fl_merge_block (value bp) { value prev, cur; header_t *adj; header_t hd = Hd_val (bp); mlsize_t prev_wosz; caml_fl_cur_wsz += Whsize_hd (hd); #ifdef DEBUG caml_set_fields (bp, 0, Debug_free_major); #endif prev = caml_fl_merge; cur = Next (prev); /* The sweep code makes sure that this is the right place to insert this block: */ Assert (prev < bp || prev == Fl_head); Assert (cur > bp || cur == Val_NULL); if (policy == Policy_first_fit) truncate_flp (prev); /* If [last_fragment] and [bp] are adjacent, merge them. */ if (last_fragment == Hp_bp (bp)){ mlsize_t bp_whsz = Whsize_val (bp); if (bp_whsz <= Max_wosize){ hd = Make_header (bp_whsz, 0, Caml_white); bp = (value) last_fragment; Hd_val (bp) = hd; caml_fl_cur_wsz += Whsize_wosize (0); } } /* If [bp] and [cur] are adjacent, remove [cur] from the free-list and merge them. */ adj = (header_t *) &Field (bp, Wosize_hd (hd)); if (adj == Hp_val (cur)){ value next_cur = Next (cur); mlsize_t cur_whsz = Whsize_val (cur); if (Wosize_hd (hd) + cur_whsz <= Max_wosize){ Next (prev) = next_cur; if (policy == Policy_next_fit && fl_prev == cur) fl_prev = prev; hd = Make_header (Wosize_hd (hd) + cur_whsz, 0, Caml_blue); Hd_val (bp) = hd; adj = (header_t *) &Field (bp, Wosize_hd (hd)); #ifdef DEBUG fl_last = Val_NULL; Next (cur) = (value) Debug_free_major; Hd_val (cur) = Debug_free_major; #endif cur = next_cur; } } /* If [prev] and [bp] are adjacent merge them, else insert [bp] into the free-list if it is big enough. */ prev_wosz = Wosize_val (prev); if ((header_t *) &Field (prev, prev_wosz) == Hp_val (bp) && prev_wosz + Whsize_hd (hd) < Max_wosize){ Hd_val (prev) = Make_header (prev_wosz + Whsize_hd (hd), 0,Caml_blue); #ifdef DEBUG Hd_val (bp) = Debug_free_major; #endif Assert (caml_fl_merge == prev); }else if (Wosize_hd (hd) != 0){ Hd_val (bp) = Bluehd_hd (hd); Next (bp) = cur; Next (prev) = bp; caml_fl_merge = bp; }else{ /* This is a fragment. Leave it in white but remember it for eventual merging with the next block. */ last_fragment = (header_t *) bp; caml_fl_cur_wsz -= Whsize_wosize (0); } return adj; }