static value caml_promote_one(struct promotion_stack* stk, struct domain* domain, value curr) { header_t curr_block_hd; int infix_offset = 0; if (Is_long(curr) || !Is_minor(curr)) return curr; /* needs no promotion */ Assert(caml_owner_of_young_block(curr) == domain); curr_block_hd = Hd_val(curr); if (Tag_hd(curr_block_hd) == Infix_tag) { infix_offset = Infix_offset_val(curr); curr -= infix_offset; curr_block_hd = Hd_val(curr); } if (Is_promoted_hd(curr_block_hd)) { /* already promoted */ return caml_addrmap_lookup(&domain->state->remembered_set->promotion, curr) + infix_offset; } else if (curr_block_hd == 0) { /* promoted by minor GC */ return Op_val(curr)[0] + infix_offset; } /* otherwise, must promote */ void* mem = caml_shared_try_alloc(domain->shared_heap, Wosize_hd(curr_block_hd), Tag_hd(curr_block_hd), 1); if (!mem) caml_fatal_error("allocation failure during promotion"); value promoted = Val_hp(mem); Hd_val(curr) = Promotedhd_hd(curr_block_hd); caml_addrmap_insert(&domain->state->remembered_set->promotion, curr, promoted); caml_addrmap_insert(&domain->state->remembered_set->promotion_rev, promoted, curr); if (Tag_hd(curr_block_hd) >= No_scan_tag) { int i; for (i = 0; i < Wosize_hd(curr_block_hd); i++) Op_val(promoted)[i] = Op_val(curr)[i]; } else { /* push to stack */ if (stk->sp == stk->stack_len) { stk->stack_len = 2 * (stk->stack_len + 10); stk->stack = caml_stat_resize(stk->stack, sizeof(struct promotion_stack_entry) * stk->stack_len); } stk->stack[stk->sp].local = curr; stk->stack[stk->sp].global = promoted; stk->stack[stk->sp].field = 0; stk->sp++; } return promoted + infix_offset; }
static intnat mark(value initial, intnat budget) { value next = initial; int found_next = 1; while (budget > 0 && found_next) { value v = next; header_t hd_v; found_next = 0; Assert(Is_markable(v)); Assert(v == mark_normalise(v)); stat_blocks_marked++; /* mark the current object */ hd_v = Hd_val(v); // caml_gc_log ("mark: v=0x%lx hd=0x%lx tag=%d sz=%lu", // v, hd_v, Tag_val(v), Wosize_val(v)); if (Tag_hd (hd_v) == Stack_tag) { // caml_gc_log ("mark: stack=%p", (value*)v); caml_scan_stack(&caml_darken, v); } else if (Tag_hd (hd_v) < No_scan_tag) { int i; for (i = 0; i < Wosize_hd(hd_v); i++) { value child = Op_val(v)[i]; // caml_gc_log ("mark: v=%p i=%u child=%p",(value*)v,i,(value*)child); /* FIXME: this is wrong, as Debug_tag(N) is a valid value. However, it's a useful debugging aid for now */ Assert(!Is_debug_tag(child)); if (Is_markable(child)) { child = mark_normalise(child); if (caml_mark_object(child)) { if (!found_next) { next = child; found_next = 1; } else { mark_stack_push(child); } } } } } budget -= Whsize_hd(hd_v); /* if we haven't found any markable children, pop an object to mark */ if (!found_next) { found_next = mark_stack_pop(&next); } } if (found_next) { mark_stack_push(next); } return budget; }
static mlsize_t size_after_expansion(value32 * p, mlsize_t len) /* len is the length in 32-bit words */ { mlsize_t res; value32 * q; header_t hd; mlsize_t n; for (q = p + len, res = 0; p < q; /*nothing*/) { hd = (header_t) *p++; res++; n = Wosize_hd(hd); switch(Tag_hd(hd)) { case String_tag: /* round to the next 64-bit word */ res += (n * sizeof(value32) + sizeof(value) - 1) / sizeof(value); break; case Double_tag: res += sizeof(double) / sizeof(value); break; default: res += n; /* all fields will be extended 32 -> 64 */ break; } p += n; } return res; }
int rev_pointers_64(value64 * p, mlsize_t size) /* size is the size in 64-bit words */ { value64 * q; header_t hd; mlsize_t n; q = p + size; while (p < q) { Reverse_int64_t(p); hd = (header_t)(p->lsw); if (p->msw != 0) return -1; p++; n = Wosize_hd(hd); switch(Tag_hd(hd)) { case String_tag: p += n; break; case Double_tag: Reverse_double(p); p += n; break; default: for( ; n > 0; n --, p++) { Reverse_int64_t(p); } } } return 0; }
static mlsize_t size_after_shrinkage(value64 * p, mlsize_t len) /* len is the length in 64-bit words */ { mlsize_t res; value64 * q; header_t hd; mlsize_t n; for (q = p + len, res = 0; p < q; /*nothing*/) { hd = (header_t)(p->lsw); if (p->msw != 0) return 0; p++; n = Wosize_hd(hd); res++; switch(Tag_hd(hd)) { case String_tag: { mlsize_t ofs_last_byte, len, new_sz; ofs_last_byte = n * sizeof(value64) - 1; len = ofs_last_byte - Byte(p, ofs_last_byte); new_sz = (len + sizeof(value)) / sizeof(value); res += new_sz; break; } case Double_tag: res += sizeof(double) / sizeof(value); break; default: res += n; /* all fields will be shrunk 64 -> 32 */ break; } p += n; } return res; }
void rev_pointers_32(value32 * p, mlsize_t size) { value32 * q; header_t hd; mlsize_t n; q = p + size; while (p < q) { Reverse_int32_t(p); hd = (header_t) *p++; n = Wosize_hd(hd); switch(Tag_hd(hd)) { case String_tag: p += n; break; case Double_tag: Reverse_double(p); p += n; break; default: for( ; n > 0; n--, p++) { Reverse_int32_t(p); } } } }
static intnat mark(value initial, intnat budget) { value next = initial; int found_next = 1; while (budget > 0 && found_next) { value v = next; header_t hd_v; found_next = 0; Assert(Is_markable(v)); Assert(v == mark_normalise(v)); stat_blocks_marked++; /* mark the current object */ hd_v = Hd_val(v); if (Tag_hd (hd_v) == Stack_tag) { caml_scan_stack(&caml_darken, v); } else if (Tag_hd (hd_v) < No_scan_tag) { int i; for (i = 0; i < Wosize_hd(hd_v); i++) { value child = Field(v, i); if (Is_markable(child)) { child = mark_normalise(child); if (caml_mark_object(child)) { if (!found_next) { next = child; found_next = 1; } else { mark_stack_push(child); } } } } } budget -= Whsize_hd(hd_v); /* if we haven't found any markable children, pop an object to mark */ if (!found_next) { found_next = mark_stack_pop(&next); } } if (found_next) { mark_stack_push(next); } return budget; }
void adjust_pointers(value * start, mlsize_t size, color_t color) { value * p, * q; mlsize_t sz; header_t hd; tag_t tag; value v; mlsize_t bosize; p = start; q = p + size; bosize = Bsize_wsize(size); while (p < q) { hd = *p; sz = Wosize_hd(hd); tag = Tag_hd(hd); *p++ = Make_header(sz, tag, color); if (tag >= No_scan_tag) p += sz; else for( ; sz > 0; sz--, p++) { v = *p; switch(v & 3) { case 0: /* 0 -> A bloc represented by its offset. */ assert(v >= 0 && v <= bosize && (v & 3) == 0); *p = (value) ((byteoffset_t) start + v); break; case 2: /* 2 -> An atom. */ v = v >> 2; assert(v >= 0 && v < 256); *p = Atom(v); break; default: /* 1 or 3 -> An integer. */ break; } } } }
void forward_pointer (void* state, value v, value *p) { header_t hd; mlsize_t offset; value fwd; struct domain* promote_domain = state; caml_domain_state* domain_state = promote_domain ? promote_domain->state : Caml_state; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; if (Is_block (v) && is_in_interval((value)Hp_val(v), young_ptr, young_end)) { hd = Hd_val(v); if (hd == 0) { *p = Op_val(v)[0]; CAMLassert (Is_block(*p) && !Is_minor(*p)); } else if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); fwd = 0; forward_pointer (state, v - offset, &fwd); if (fwd) *p = fwd + offset; } } }
void caml_oldify_one (value v, value *p) { value result; header_t hd; mlsize_t sz, i; tag_t tag; tail_call: if (Is_block (v) && Is_young (v)){ if (Hp_val(v) < caml_young_ptr) printf("%lx, %lx\n", Hp_val(v), caml_young_ptr); Assert (Hp_val (v) >= caml_young_ptr); hd = Hd_val (v); if (hd == 0){ /* If already forwarded */ *p = Field (v, 0); /* then forward pointer is first field. */ }else{ tag = Tag_hd (hd); if (tag < Infix_tag){ value field0; sz = Wosize_hd (hd); result = caml_alloc_shr (sz, tag); *p = result; field0 = Field (v, 0); Hd_val (v) = 0; /* Set forward flag */ Field (v, 0) = result; /* and forward pointer. */ if (sz > 1){ Field (result, 0) = field0; Field (result, 1) = oldify_todo_list; /* Add this block */ oldify_todo_list = v; /* to the "to do" list. */ }else{ Assert (sz == 1); p = &Field (result, 0); v = field0; goto tail_call; } }else if (tag >= No_scan_tag){ sz = Wosize_hd (hd); result = caml_alloc_shr (sz, tag); for (i = 0; i < sz; i++) Field (result, i) = Field (v, i); Hd_val (v) = 0; /* Set forward flag */ Field (v, 0) = result; /* and forward pointer. */ *p = result; }else if (tag == Infix_tag){ mlsize_t offset = Infix_offset_hd (hd); caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */ *p += offset; }else{ value f = Forward_val (v); tag_t ft = 0; int vv = 1; Assert (tag == Forward_tag); if (Is_block (f)){ vv = Is_in_value_area(f); if (vv) { ft = Tag_val (Hd_val (f) == 0 ? Field (f, 0) : f); } } if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){ /* Do not short-circuit the pointer. Copy as a normal block. */ Assert (Wosize_hd (hd) == 1); result = caml_alloc_shr (1, Forward_tag); *p = result; Hd_val (v) = 0; /* Set (GC) forward flag */ Field (v, 0) = result; /* and forward pointer. */ p = &Field (result, 0); v = f; goto tail_call; }else{ v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } } }else{ *p = v; } }
int netsys_init_value_1(struct htab *t, struct nqueue *q, char *dest, char *dest_end, value orig, int enable_bigarrays, int enable_customs, int enable_atoms, int simulation, void *target_addr, struct named_custom_ops *target_custom_ops, int color, intnat *start_offset, intnat *bytelen ) { void *orig_addr; void *work_addr; value work; int work_tag; char *work_header; size_t work_bytes; size_t work_words; void *copy_addr; value copy; char *copy_header; header_t copy_header1; int copy_tag; size_t copy_words; void *fixup_addr; char *dest_cur; char *dest_ptr; int code, i; intnat addr_delta; struct named_custom_ops *ops_ptr; void *int32_target_ops; void *int64_target_ops; void *nativeint_target_ops; void *bigarray_target_ops; copy = 0; dest_cur = dest; addr_delta = ((char *) target_addr) - dest; if (dest_cur >= dest_end && !simulation) return (-4); /* out of space */ if (!Is_block(orig)) return (-2); orig_addr = (void *) orig; code = netsys_queue_add(q, orig_addr); if (code != 0) return code; /* initialize *_target_ops */ bigarray_target_ops = NULL; int32_target_ops = NULL; int64_target_ops = NULL; nativeint_target_ops = NULL; ops_ptr = target_custom_ops; while (ops_ptr != NULL) { if (strcmp(ops_ptr->name, "_bigarray") == 0) bigarray_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_i") == 0) int32_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_j") == 0) int64_target_ops = ops_ptr->ops; else if (strcmp(ops_ptr->name, "_n") == 0) nativeint_target_ops = ops_ptr->ops; ops_ptr = ops_ptr->next; }; /* First pass: Iterate over the addresses found in q. Ignore addresses already seen in the past (which are in t). For new addresses, make a copy, and add these copies to t. */ /* fprintf(stderr, "first pass, orig_addr=%lx simulation=%d addr_delta=%lx\n", (unsigned long) orig_addr, simulation, addr_delta); */ code = netsys_queue_take(q, &work_addr); while (code != (-3)) { if (code != 0) return code; /* fprintf(stderr, "work_addr=%lx\n", (unsigned long) work_addr); */ code = netsys_htab_lookup(t, work_addr, ©_addr); if (code != 0) return code; if (copy_addr == NULL) { /* The address is unknown, so copy the value */ /* Body of first pass */ work = (value) work_addr; work_tag = Tag_val(work); work_header = Hp_val(work); if (work_tag < No_scan_tag) { /* It is a scanned value (with subvalues) */ switch(work_tag) { case Object_tag: case Closure_tag: case Lazy_tag: case Forward_tag: return (-2); /* unsupported */ } work_words = Wosize_hp(work_header); if (work_words == 0) { if (!enable_atoms) return (-2); if (enable_atoms == 1) goto next; }; /* Do the copy. */ work_bytes = Bhsize_hp(work_header); copy_header = dest_cur; dest_cur += work_bytes; if (dest_cur > dest_end && !simulation) return (-4); if (simulation) copy_addr = work_addr; else { memcpy(copy_header, work_header, work_bytes); copy = Val_hp(copy_header); copy_addr = (void *) copy; Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color; } /* Add the association (work_addr -> copy_addr) to t: */ code = netsys_htab_add(t, work_addr, copy_addr); if (code < 0) return code; /* Add the sub values of work_addr to q: */ for (i=0; i < work_words; ++i) { value field = Field(work, i); if (Is_block (field)) { code = netsys_queue_add(q, (void *) field); if (code != 0) return code; } } } else { /* It an opaque value */ int do_copy = 0; int do_bigarray = 0; void *target_ops = NULL; char caml_id = ' '; /* only b, i, j, n */ /* Check for bigarrays and other custom blocks */ switch (work_tag) { case Abstract_tag: return(-2); case String_tag: do_copy = 1; break; case Double_tag: do_copy = 1; break; case Double_array_tag: do_copy = 1; break; case Custom_tag: { struct custom_operations *custom_ops; char *id; custom_ops = Custom_ops_val(work); id = custom_ops->identifier; if (id[0] == '_') { switch (id[1]) { case 'b': if (!enable_bigarrays) return (-2); if (strcmp(id, "_bigarray") == 0) { caml_id = 'b'; break; } case 'i': /* int32 */ case 'j': /* int64 */ case 'n': /* nativeint */ if (!enable_customs) return (-2); if (id[2] == 0) { caml_id = id[1]; break; } default: return (-2); } } else return (-2); } }; /* switch */ switch (caml_id) { /* look closer at some cases */ case 'b': { target_ops = bigarray_target_ops; do_copy = 1; do_bigarray = 1; break; } case 'i': target_ops = int32_target_ops; do_copy = 1; break; case 'j': target_ops = int64_target_ops; do_copy = 1; break; case 'n': target_ops = nativeint_target_ops; do_copy = 1; break; }; if (do_copy) { /* Copy the value */ work_bytes = Bhsize_hp(work_header); copy_header = dest_cur; dest_cur += work_bytes; if (simulation) copy_addr = work_addr; else { if (dest_cur > dest_end) return (-4); memcpy(copy_header, work_header, work_bytes); copy = Val_hp(copy_header); copy_addr = (void *) copy; Hd_val(copy) = Whitehd_hd(Hd_val(copy)) | color; if (target_ops != NULL) Custom_ops_val(copy) = target_ops; } code = netsys_htab_add(t, work_addr, copy_addr); if (code < 0) return code; } if (do_bigarray) { /* postprocessing for copying bigarrays */ struct caml_ba_array *b_work, *b_copy; void * data_copy; char * data_header; header_t data_header1; size_t size = 1; size_t size_aligned; size_t size_words; b_work = Bigarray_val(work); b_copy = Bigarray_val(copy); for (i = 0; i < b_work->num_dims; i++) { size = size * b_work->dim[i]; }; size = size * caml_ba_element_size[b_work->flags & BIGARRAY_KIND_MASK]; size_aligned = size; if (size%sizeof(void *) != 0) size_aligned += sizeof(void *) - (size%sizeof(void *)); size_words = Wsize_bsize(size_aligned); /* If we put the copy of the bigarray into our own dest buffer, also generate an abstract header, so it can be skipped when iterating over it. We use here a special representation, so we can encode any length in this header (with a normal Ocaml header we are limited by Max_wosize, e.g. 16M on 32 bit systems). The special representation is an Abstract_tag with zero length, followed by the real length (in words) */ if (enable_bigarrays == 2) { data_header = dest_cur; dest_cur += 2*sizeof(void *); data_copy = dest_cur; dest_cur += size_aligned; } else if (!simulation) { data_header = NULL; data_copy = stat_alloc(size_aligned); }; if (!simulation) { if (dest_cur > dest_end) return (-4); /* Initialize header: */ if (data_header != NULL) { data_header1 = Abstract_tag; memcpy(data_header, (char *) &data_header1, sizeof(header_t)); memcpy(data_header + sizeof(header_t), (size_t *) &size_words, sizeof(size_t)); }; /* Copy bigarray: */ memcpy(data_copy, b_work->data, size); b_copy->data = data_copy; b_copy->proxy = NULL; /* If the copy is in our own buffer, it is now externally managed. */ b_copy->flags = (b_copy->flags & ~CAML_BA_MANAGED_MASK) | (enable_bigarrays == 2 ? CAML_BA_EXTERNAL : CAML_BA_MANAGED); } } } /* if (work_tag < No_scan_tag) */ } /* if (copy_addr == NULL) */ /* Switch to next address in q: */ next: code = netsys_queue_take(q, &work_addr); } /* while */ /* Second pass. The copied blocks still have fields pointing to the original blocks. We fix that now by iterating once over the copied memory block. */ if (!simulation) { /* fprintf(stderr, "second pass\n"); */ dest_ptr = dest; while (dest_ptr < dest_cur) { copy_header1 = *((header_t *) dest_ptr); copy_tag = Tag_hd(copy_header1); copy_words = Wosize_hd(copy_header1); copy = (value) (dest_ptr + sizeof(void *)); if (copy_tag < No_scan_tag) { for (i=0; i < copy_words; ++i) { value field = Field(copy, i); if (Is_block (field)) { /* It is a pointer. Try to fix it up. */ code = netsys_htab_lookup(t, (void *) field, &fixup_addr); if (code != 0) return code; if (fixup_addr != NULL) Field(copy,i) = (value) (((char *) fixup_addr) + addr_delta); } } } else if (copy_tag == Abstract_tag && copy_words == 0) { /* our special representation for skipping data regions */ copy_words = ((size_t *) dest_ptr)[1] + 1; }; dest_ptr += (copy_words + 1) * sizeof(void *); } } /* hey, fine. Return result */ *start_offset = sizeof(void *); *bytelen = dest_cur - dest; /* fprintf(stderr, "return regularly\n");*/ return 0; }
/* Make sure the minor heap is empty by performing a minor collection if * needed. */ void caml_empty_minor_heap (void) { uintnat minor_allocated_bytes = caml_domain_state->young_end - caml_domain_state->young_ptr; unsigned rewritten = 0; struct caml_ref_entry *r; caml_save_stack_gc(); stat_live_bytes = 0; if (minor_allocated_bytes != 0){ caml_gc_log ("Minor collection starting"); caml_do_local_roots(&caml_oldify_one, caml_domain_self()); for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){ value x; caml_oldify_one (Op_val(r->obj)[r->field], &x); } for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) { caml_scan_dirty_stack(&caml_oldify_one, r->obj); } caml_oldify_mopup (); for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){ value v = Op_val(r->obj)[r->field]; if (Is_block(v) && Is_young(v)) { Assert (Hp_val (v) >= caml_domain_state->young_ptr); value vnew; header_t hd = Hd_val(v); // FIXME: call oldify_one here? if (Is_promoted_hd(hd)) { vnew = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v); } else { int offset = 0; if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); v -= offset; } Assert (Hd_val (v) == 0); vnew = Op_val(v)[0] + offset; } Assert(Is_block(vnew) && !Is_young(vnew)); Assert(Hd_val(vnew)); if (Tag_hd(hd) == Infix_tag) { Assert(Tag_val(vnew) == Infix_tag); } rewritten += caml_atomic_cas_field(r->obj, r->field, v, vnew); } } caml_addrmap_iter(&caml_domain_state->remembered_set->promotion, unpin_promoted_object); if (caml_domain_state->young_ptr < caml_domain_state->young_start) caml_domain_state->young_ptr = caml_domain_state->young_start; caml_stat_minor_words += Wsize_bsize (minor_allocated_bytes); caml_domain_state->young_ptr = caml_domain_state->young_end; clear_table (&caml_domain_state->remembered_set->ref); caml_addrmap_clear(&caml_domain_state->remembered_set->promotion); caml_addrmap_clear(&caml_domain_state->remembered_set->promotion_rev); caml_gc_log ("Minor collection completed: %u of %u kb live, %u pointers rewritten", (unsigned)stat_live_bytes/1024, (unsigned)minor_allocated_bytes/1024, rewritten); } for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) { caml_scan_dirty_stack(&caml_darken, r->obj); caml_clean_stack(r->obj); } clear_table (&caml_domain_state->remembered_set->fiber_ref); caml_restore_stack_gc(); #ifdef DEBUG { value *p; for (p = (value *) caml_domain_state->young_start; p < (value *) caml_domain_state->young_end; ++p){ *p = Debug_free_minor; } ++ minor_gc_counter; } #endif }
static void caml_oldify_one (value v, value *p) { value result; header_t hd; mlsize_t sz, i; tag_t tag; tail_call: if (Is_block (v) && Is_young (v)){ Assert (Hp_val (v) >= caml_domain_state->young_ptr); hd = Hd_val (v); stat_live_bytes += Bhsize_hd(hd); if (Is_promoted_hd (hd)) { *p = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v); } else if (hd == 0){ /* If already forwarded */ *p = Op_val(v)[0]; /* then forward pointer is first field. */ }else{ tag = Tag_hd (hd); if (tag < Infix_tag){ value field0; sz = Wosize_hd (hd); result = alloc_shared (sz, tag); *p = result; if (tag == Stack_tag) { memcpy((void*)result, (void*)v, sizeof(value) * sz); Hd_val (v) = 0; Op_val(v)[0] = result; Op_val(v)[1] = oldify_todo_list; oldify_todo_list = v; } else { field0 = Op_val(v)[0]; Hd_val (v) = 0; /* Set forward flag */ Op_val(v)[0] = result; /* and forward pointer. */ if (sz > 1){ Op_val (result)[0] = field0; Op_val (result)[1] = oldify_todo_list; /* Add this block */ oldify_todo_list = v; /* to the "to do" list. */ }else{ Assert (sz == 1); p = Op_val(result); v = field0; goto tail_call; } } }else if (tag >= No_scan_tag){ sz = Wosize_hd (hd); result = alloc_shared(sz, tag); for (i = 0; i < sz; i++) Op_val (result)[i] = Op_val(v)[i]; Hd_val (v) = 0; /* Set forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ *p = result; }else if (tag == Infix_tag){ mlsize_t offset = Infix_offset_hd (hd); caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */ *p += offset; } else{ value f = Forward_val (v); tag_t ft = 0; int vv = 1; Assert (tag == Forward_tag); if (Is_block (f)){ if (Is_young (f)){ vv = 1; ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f); }else{ vv = 1; if (vv){ ft = Tag_val (f); } } } if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){ /* Do not short-circuit the pointer. Copy as a normal block. */ Assert (Wosize_hd (hd) == 1); result = alloc_shared (1, Forward_tag); *p = result; Hd_val (v) = 0; /* Set (GC) forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ p = Op_val (result); v = f; goto tail_call; }else{ v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } } }else{ *p = v; } }
static void lm_heap_check_aux1(char *name) { char *start, *ptr, *end; char *v; value p, *next; mlsize_t size; unsigned i, index, found; char *pointers[1 << 16]; start = caml_young_start; ptr = caml_young_ptr; end = caml_young_end; fprintf(stderr, "AAA: %s: [0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx] (%ld/%ld/%ld bytes)\n", name, (unsigned long) caml_young_start, (unsigned long) caml_young_ptr, (unsigned long) caml_young_limit, (unsigned long) caml_young_end, ((unsigned long) caml_young_end) - (unsigned long) caml_young_ptr, ((unsigned long) caml_young_end) - (unsigned long) caml_young_limit, ((unsigned long) caml_young_end) - (unsigned long) caml_young_start); fflush(stderr); /* * Phase 1: check that the headers have the right sizes. */ v = (char *) Val_hp(caml_young_ptr); index = 0; while(v < caml_young_end) { pointers[index++] = (char *) v; size = Wosize_val(v); fprintf(stderr, "%s: 0x%08lx: size %lud, tag = %d\n", name, (unsigned long) v, size, Tag_val(v)); found = 0; for(i = 0; i != 10; i++) { next = &Field(v, size + i); if(next < (value *) caml_young_end) { p = *next; #define Debug_free_minor 0xD700D6D7ul if(p == Debug_free_minor) { fprintf(stderr, "\tnext[%d]:0x%08lx = 0x%08lx\n", i, (unsigned long) next, (unsigned long) p); found = 1; } else if(found) fprintf(stderr, "\tnext[%d]:0x%08lx = 0x%08lx, size = %lud, tag = %d\n", i, (unsigned long) next, (unsigned long) p, Wosize_hd(p), Tag_hd(p)); } } fflush(stderr); v = (char *) &Field(v, size + 1); } if(v > (char *) Val_hp(caml_young_end)) { fprintf(stderr, "%s: heap is bogus\n", name); fflush(stderr); return; } /* * Phase 2: check that all the fields point to actual * values. */ v = (char *) Val_hp(caml_young_ptr); while(v < caml_young_end) { size = Wosize_val(v); if(Tag_val(v) < No_scan_tag) { fprintf(stderr, "%s: scanning 0x%08lx: size %lud, tag = %d\n", name, (unsigned long) v, size, Tag_val(v)); fflush(stderr); for(i = 0; i != size; i++) { char *p = (char *) Field(v, i); if(Is_block((value) p)) { if(p >= caml_young_limit && p < caml_young_ptr) { fprintf(stderr, "%s: pointer refers to empty young space\n", name); fflush(stderr); return; } if(p >= caml_young_ptr && p < caml_young_end) search_pointer(pointers, name, index, p, v, i); } } } v = (char *) &Field(v, size + 1); } }
static void extern_rec(value v) { tailcall: if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8(CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16(CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64(CODE_INT64, n); #endif } else writecode32(CODE_INT32, n); return; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; goto tailcall; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32(CODE_BLOCK32, hd); } return; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8(CODE_SHARED8, d); } else if (d < 0x10000) { writecode16(CODE_SHARED16, d); } else { writecode32(CODE_SHARED32, d); } return; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8(CODE_STRING8, len); } else { writecode32(CODE_STRING32, len); } writeblock(String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location(v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location(v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8(CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32(CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location(v); break; } case Abstract_tag: extern_invalid_argument("output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec(v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64) = Custom_ops_val(v)->serialize; if (serialize == NULL) extern_invalid_argument("output_value: abstract value (Custom)"); Write(CODE_CUSTOM); writeblock(ident, strlen(ident) + 1); Custom_ops_val(v)->serialize(v, &sz_32, &sz_64); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); extern_record_location(v); break; } default: { value field0; mlsize_t i; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64(CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32(CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location(v); if (sz == 1) { v = field0; } else { extern_rec(field0); for (i = 1; i < sz - 1; i++) extern_rec(Field(v, i)); v = Field(v, i); } goto tailcall; } } } else if ((char *) v >= caml_code_area_start &&
static void extern_rec(value v) { tailcall: if (Is_long(v)) { intnat n = Long_val(v); return; } else { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (Tag_val (f) == Closure_tag || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; goto tailcall; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { return; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); return; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); extern_record_location(v); break; } case Double_tag: { // if (sizeof(double) != 8) // extern_invalid_argument("output_value: non-standard floats"); extern_record_location(v); break; } case Double_array_tag: { mlsize_t nfloats; // if (sizeof(double) != 8) // extern_invalid_argument("output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; extern_record_location(v); break; } case Abstract_tag: // extern_invalid_argument("output_value: abstract value (Abstract)"); break; // case Infix_tag: // writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd)); // extern_rec(v - Infix_offset_hd(hd)); // break; case Custom_tag: { extern_record_location(v); break; } default: { value field0; mlsize_t i; field0 = Field(v, 0); extern_record_location(v); if (sz == 1) { v = field0; } else { extern_rec(field0); for (i = 1; i < sz - 1; i++) extern_rec(Field(v, i)); v = Field(v, i); } goto tailcall; } } } }
/* Note that the tests on the tag depend on the fact that Infix_tag, Forward_tag, and No_scan_tag are contiguous. */ static void oldify_one (void* st_v, value v, value *p) { struct oldify_state* st = st_v; value result; header_t hd; mlsize_t sz, i; mlsize_t infix_offset; tag_t tag; caml_domain_state* domain_state = st->promote_domain ? st->promote_domain->state : Caml_state; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; CAMLassert (domain_state->young_start <= domain_state->young_ptr && domain_state->young_ptr <= domain_state->young_end); tail_call: if (!(Is_block(v) && is_in_interval((value)Hp_val(v), young_ptr, young_end))) { /* not a minor block */ *p = v; return; } infix_offset = 0; do { hd = Hd_val (v); if (hd == 0) { /* already forwarded, forward pointer is first field. */ *p = Op_val(v)[0] + infix_offset; return; } tag = Tag_hd (hd); if (tag == Infix_tag) { /* Infix header, retry with the real block */ CAMLassert (infix_offset == 0); infix_offset = Infix_offset_hd (hd); CAMLassert(infix_offset > 0); v -= infix_offset; } } while (tag == Infix_tag); if (((value)Hp_val(v)) > st->oldest_promoted) { st->oldest_promoted = (value)Hp_val(v); } if (tag == Cont_tag) { struct stack_info* stk = Ptr_val(Op_val(v)[0]); CAMLassert(Wosize_hd(hd) == 1 && infix_offset == 0); result = alloc_shared(1, Cont_tag); *p = result; Op_val(result)[0] = Val_ptr(stk); *Hp_val (v) = 0; Op_val(v)[0] = result; if (stk != NULL) caml_scan_stack(&oldify_one, st, stk); } else if (tag < Infix_tag) { value field0; sz = Wosize_hd (hd); st->live_bytes += Bhsize_hd(hd); result = alloc_shared (sz, tag); *p = result + infix_offset; field0 = Op_val(v)[0]; CAMLassert (!Is_debug_tag(field0)); *Hp_val (v) = 0; /* Set forward flag */ Op_val(v)[0] = result; /* and forward pointer. */ if (sz > 1){ Op_val (result)[0] = field0; Op_val (result)[1] = st->todo_list; /* Add this block */ st->todo_list = v; /* to the "to do" list. */ }else{ CAMLassert (sz == 1); p = Op_val(result); v = field0; goto tail_call; } } else if (tag >= No_scan_tag) { sz = Wosize_hd (hd); st->live_bytes += Bhsize_hd(hd); result = alloc_shared(sz, tag); for (i = 0; i < sz; i++) { value curr = Op_val(v)[i]; Op_val (result)[i] = curr; } *Hp_val (v) = 0; /* Set forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ CAMLassert (infix_offset == 0); *p = result; } else { CAMLassert (tag == Forward_tag); CAMLassert (infix_offset == 0); value f = Forward_val (v); tag_t ft = 0; if (Is_block (f)) { ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f); } if (ft == Forward_tag || ft == Lazy_tag || ft == Double_tag) { /* Do not short-circuit the pointer. Copy as a normal block. */ CAMLassert (Wosize_hd (hd) == 1); st->live_bytes += Bhsize_hd(hd); result = alloc_shared (1, Forward_tag); *p = result; *Hp_val (v) = 0; /* Set (GC) forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ p = Op_val (result); v = f; goto tail_call; } else { v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } }
static int shrink_block(value64 * source, value * dest, mlsize_t source_len, mlsize_t dest_len, color_t color) { value64 * p, * q; value * d, * e; header_t hd; mlsize_t sz; tag_t tag; byteoffset_t * forward_addr; byteoffset_t dest_ofs; value v; /* First pass: copy the objects and set up forwarding pointers. The pointers contained inside blocks are not resolved. */ for (p = source, q = source + source_len, d = dest; p < q; /*nothing*/) { hd = (header_t)(p->lsw); p++; sz = Wosize_hd(hd); tag = Tag_hd(hd); forward_addr = (byteoffset_t *) p; dest_ofs = d + 1 - dest; switch(tag) { case String_tag: { mlsize_t ofs_last_byte, len, new_sz; ofs_last_byte = sz * sizeof(value64) - 1; len = ofs_last_byte - Byte(p, ofs_last_byte); new_sz = (len + sizeof(value)) / sizeof(value); *d++ = Make_header(new_sz, String_tag, color); Field(d, new_sz - 1) = 0; bcopy(p, d, len); ofs_last_byte = new_sz * sizeof(value) - 1; Byte(d, ofs_last_byte) = ofs_last_byte - len; p += sz; d += new_sz; break; } case Double_tag: *d++ = Make_header(Double_wosize, Double_tag, color); Store_double_val((value)d, Double_val((value)p)); p += sizeof(double) / sizeof(value64); d += sizeof(double) / sizeof(value); break; default: *d++ = Make_header(sz, tag, color); for (/*nothing*/; sz > 0; sz--, p++, d++) { value lsw = p->lsw; value msw = p->msw; if ((lsw & 1) == 0) { /* If relative displacement: */ if (msw != 0) return -1; /* Check unsigned displacement fits in 32 */ } else { /* Otherwise, it's a signed integer */ if ((lsw >= 0 && msw != 0) || (lsw < 0 && msw != -1)) return -1; } *d = lsw; } } *forward_addr = dest_ofs; /* store the forwarding pointer */ } assert(d == dest + dest_len); /* Second pass: resolve pointers contained inside blocks, replacing them by the corresponding forwarding pointer. */ for (d = dest, e = dest + dest_len; d < e; /*nothing*/) { hd = (header_t) *d++; sz = Wosize_hd(hd); tag = Tag_hd(hd); if (tag >= No_scan_tag) { d += sz; } else { for (/*nothing*/; sz > 0; sz--, d++) { v = *d; switch(v & 3) { case 0: /* 0: a block represented by its offset */ assert(v >= 0 && v < source_len * sizeof(value64) && (v & 7) == 0); *d = (value) (dest + *((byteoffset_t *)((char *) source + v))); break; case 2: /* 2: an atom */ v = v >> 2; assert(v >= 0 && v < 256); *d = Atom(v); break; default: /* 1 or 3: an integer */ break; } } } } return 0; }
CAMLexport value caml_promote(struct domain* domain, value root) { value **r; value iter, f; mlsize_t i; caml_domain_state* domain_state = domain->state; struct caml_minor_tables *minor_tables = domain_state->minor_tables; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; float percent_to_scan; uintnat prev_alloc_words = domain_state->allocated_words; struct oldify_state st = {0}; struct caml_ephe_ref_elt *re; /* Integers are already shared */ if (Is_long(root)) return root; /* Objects which are in the major heap are already shared. */ if (!Is_minor(root)) return root; st.oldest_promoted = (value)domain_state->young_start; st.promote_domain = domain; CAMLassert(caml_owner_of_young_block(root) == domain); oldify_one (&st, root, &root); oldify_mopup (&st); CAMLassert (!Is_minor(root)); /* FIXME: surely a newly-allocated root is already darkened? */ caml_darken(0, root, 0); percent_to_scan = st.oldest_promoted <= (value)young_ptr ? 0.0 : (((float)(st.oldest_promoted - (value)young_ptr)) * 100.0 / ((value)young_end - (value)domain_state->young_start)); if (percent_to_scan > Percent_to_promote_with_GC) { caml_gc_log("caml_promote: forcing minor GC. %%_minor_to_scan=%f", percent_to_scan); // ??? caml_empty_minor_heap_domain (domain); } else { caml_do_local_roots (&forward_pointer, st.promote_domain, domain, 1); caml_scan_stack (&forward_pointer, st.promote_domain, domain_state->current_stack); /* Scan major to young pointers. */ for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value old_p = **r; if (Is_block(old_p) && is_in_interval(old_p,young_ptr,young_end)) { value new_p = old_p; forward_pointer (st.promote_domain, new_p, &new_p); if (old_p != new_p) { if (caml_domain_alone()) **r = new_p; else atomic_compare_exchange_strong((atomic_value*)*r, &old_p, new_p); } } } /* Scan ephemeron ref table */ for (re = minor_tables->ephe_ref.base; re < minor_tables->ephe_ref.ptr; re++) { value* key = &Op_val(re->ephe)[re->offset]; if (Is_block(*key) && is_in_interval(*key,young_ptr,young_end)) { forward_pointer (st.promote_domain, *key, key); } } /* Scan young to young pointers */ for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) { forward_pointer (st.promote_domain, **r, *r); } /* Scan newer objects */ for (iter = (value)young_ptr; iter <= st.oldest_promoted; iter = next_minor_block(domain_state, iter)) { value hd = Hd_hp(iter); value curr = Val_hp(iter); if (hd != 0) { tag_t tag = Tag_hd (hd); if (tag == Cont_tag) { struct stack_info* stk = Ptr_val(Op_val(curr)[0]); if (stk != NULL) caml_scan_stack(&forward_pointer, st.promote_domain, stk); } else if (tag < No_scan_tag) { for (i = 0; i < Wosize_hd (hd); i++) { f = Op_val(curr)[i]; if (Is_block(f)) { forward_pointer (st.promote_domain, f,((value*)curr) + i); } } } } } } domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words; return root; }
static void extern_rec_r(CAML_R, value v) { struct code_fragment * cf; struct extern_item * sp; sp = extern_stack; while(1) { //?????DUMP("QQQ 0x%lx, or %li ", v, v); if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8_r(ctx, CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16_r(ctx, CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64_r(ctx, CODE_INT64, n); #endif } else writecode32_r(ctx, CODE_INT32, n); goto next_item; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); //DUMP("dumping %p, tag %i, size %i", (void*)v, (int)tag, (int)sz); // !!!!!!!!!!!!!!! if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; continue; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32_r(ctx, CODE_BLOCK32, hd); } goto next_item; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8_r(ctx, CODE_SHARED8, d); } else if (d < 0x10000) { writecode16_r(ctx, CODE_SHARED16, d); } else { writecode32_r(ctx, CODE_SHARED32, d); } goto next_item; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8_r(ctx, CODE_STRING8, len); } else { writecode32_r(ctx, CODE_STRING32, len); } writeblock_r(ctx, String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location_r(ctx, v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument_r(ctx, "output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location_r(ctx,v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument_r(ctx, "output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8_r(ctx, CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32_r(ctx, CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location_r(ctx, v); break; } case Abstract_tag: extern_invalid_argument_r(ctx, "output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32_r(ctx,CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec_r(ctx, v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64); //printf("[object at %p, which is a %s custom: BEGIN\n", (void*)v, Custom_ops_val(v)->identifier); if(extern_cross_context){ //printf("About the object at %p, which is a %s custom: USING a cross-context serializer\n", (void*)v, Custom_ops_val(v)->identifier); serialize = Custom_ops_val(v)->cross_context_serialize; } else{ //printf("About the object at %p, which is a %s custom: NOT using a cross-context serializer\n", (void*)v, Custom_ops_val(v)->identifier); serialize = Custom_ops_val(v)->serialize; } //printf("Still alive 100\n"); if (serialize == NULL){ ////// //struct custom_operations *o = Custom_ops_val(v); //printf("About the object at %p, which is a %s custom\n", (void*)v, Custom_ops_val(v)->identifier); volatile int a = 1; a /= 0; /////////// extern_invalid_argument_r(ctx, "output_value: abstract value (Custom)"); } //printf("Still alive 200\n"); Write(CODE_CUSTOM); //printf("Still alive 300\n"); writeblock_r(ctx, ident, strlen(ident) + 1); //printf("Still alive 400\n"); serialize(v, &sz_32, &sz_64); //printf("Still alive 500\n"); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); //printf("Still alive 600\n"); extern_record_location_r(ctx,v); // This temporarily breaks the object, by replacing it with a forwarding pointer //printf("object at %p, which is a custom: END\n", (void*)v); break; } default: { value field0; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64_r(ctx, CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32_r(ctx, CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location_r(ctx, v); /* Remember that we still have to serialize fields 1 ... sz - 1 */ if (sz > 1) { sp++; if (sp >= extern_stack_limit) sp = extern_resize_stack_r(ctx, sp); sp->v = &Field(v,1); sp->count = sz-1; } /* Continue serialization with the first field */ v = field0; continue; } } } else if ((cf = extern_find_code_r(ctx, (char *) v)) != NULL) { if (!extern_closures){ extern_invalid_argument_r(ctx, "output_value: functional value"); // FIXME: this is the correct version. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! //DUMP("output_value: functional value"); {volatile int a = 1; a /= 0;} } //fprintf(stderr, "ZZZZ dumping a code pointer: BEGIN\n"); //DUMP("dumping a code pointer 0x%lx, or %li; code start is at %p", v, v, cf->code_start); writecode32_r(ctx, CODE_CODEPOINTER, (char *) v - cf->code_start); writeblock_r(ctx, (char *) cf->digest, 16); //dump_digest(cf->digest); //fprintf(stderr, "ZZZZ dumping a code pointer: END\n"); } else { if(extern_cross_context){ fprintf(stderr, "ZZZZ working on the external pointer: %p, which is to say %li [cf is %p]\n", (void*)v, (long)v, cf); //fprintf(stderr, "ZZZZ I'm doing a horrible, horrible thing: serializing the pointer as a tagged 0.\n"); DUMP("about to crash in the strange case I'm debugging"); /* DUMP("the object is 0x%lx, or %li ", v, v); */ /* DUMP("probably crashing now"); */ /* DUMP("tag is %i", (int)Tag_val(v)); */ /* DUMP("size is %i", (int)Wosize_val(v)); */ //volatile int a = 1; a /= 0; //extern_rec_r(ctx, Val_int(0)); /* fprintf(stderr, "ZZZZ [This is probably wrong: I'm marshalling an out-of-heap pointer as an int64]\n"); */ /* writecode64_r(ctx, CODE_INT64, (v << 1) | 1); */ extern_invalid_argument_r(ctx, "output_value: abstract value (outside heap) [FIXME: implement]"); } else extern_invalid_argument_r(ctx, "output_value: abstract value (outside heap)"); } next_item: /* Pop one more item to marshal, if any */ if (sp == extern_stack) { /* We are done. Cleanup the stack and leave the function */ extern_free_stack_r(ctx); return; } v = *((sp->v)++); if (--(sp->count) == 0) sp--; }
static void expand_block(value32 * source, value * dest, mlsize_t source_len, mlsize_t dest_len, color_t color) { value32 * p, * q; value * d, * e; header_t hd; mlsize_t sz; tag_t tag; uint32_t * forward_addr; uint32_t dest_ofs; value v; /* First pass: copy the objects and set up forwarding pointers. The pointers contained inside blocks are not resolved. */ for (p = source, q = source + source_len, d = dest; p < q; /*nothing*/) { hd = (header_t) *p++; sz = Wosize_hd(hd); tag = Tag_hd(hd); forward_addr = (uint32_t *) p; dest_ofs = d + 1 - dest; switch(tag) { case String_tag: { mlsize_t ofs_last_byte, len, new_sz; ofs_last_byte = sz * sizeof(value32) - 1; len = ofs_last_byte - Byte(p, ofs_last_byte); new_sz = (sz * sizeof(value32) + sizeof(value) - 1) / sizeof(value); *d++ = Make_header(new_sz, String_tag, color); Field(d, new_sz - 1) = 0; bcopy((char *)p, (char *)d, len); ofs_last_byte = new_sz * sizeof(value) - 1; Byte(d, ofs_last_byte) = ofs_last_byte - len; p += sz; d += new_sz; break; } case Double_tag: *d++ = Make_header(Double_wosize, Double_tag, color); /* Cannot do *((double *) d) = *((double *) p) directly because p might not be 64-aligned. */ assert(sizeof(double) == sizeof(value)); ((value32 *) d)[0] = p[0]; ((value32 *) d)[1] = p[1]; p += sizeof(double) / sizeof(value32); d += 1; break; default: *d++ = Make_header(sz, tag, color); for (/*nothing*/; sz > 0; sz--, p++, d++) { if ((*p & 1) == 0) { *d = *((uint32_t *) p); /* copy, zero expansion */ } else { *d = *((int32_t *) p); /* copy, sign expansion */ } } break; } *forward_addr = dest_ofs; /* store the forwarding pointer */ } assert(d == dest + dest_len); /* Second pass: resolve pointers contained inside blocks, replacing them by the corresponding forwarding pointer. */ for (d = dest, e = dest + dest_len; d < e; /*nothing*/) { hd = (header_t) *d++; sz = Wosize_hd(hd); tag = Tag_hd(hd); if (tag >= No_scan_tag) { d += sz; } else { for (/*nothing*/; sz > 0; sz--, d++) { v = *d; switch(v & 3) { case 0: /* 0: a block represented by its offset */ assert(v >= 0 && v < source_len * sizeof(value32) && (v & 3) == 0); *d = (value) (dest + *((uint32_t *)((char *) source + v))); break; case 2: /* 2: an atom */ v = v >> 2; assert(v >= 0 && v < 256); *d = Atom(v); break; default: /* 1 or 3: an integer */ break; } } } } }
void caml_empty_minor_heap_domain (struct domain* domain) { CAMLnoalloc; caml_domain_state* domain_state = domain->state; struct caml_minor_tables *minor_tables = domain_state->minor_tables; unsigned rewrite_successes = 0; unsigned rewrite_failures = 0; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; uintnat minor_allocated_bytes = young_end - young_ptr; struct oldify_state st = {0}; value **r; struct caml_ephe_ref_elt *re; struct caml_custom_elt *elt; st.promote_domain = domain; if (minor_allocated_bytes != 0) { uintnat prev_alloc_words = domain_state->allocated_words; #ifdef DEBUG /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers from older to younger objects */ { struct addrmap young_young_ptrs = ADDRMAP_INIT; mlsize_t i; value iter; for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) { *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1; } for (iter = (value)young_ptr; iter < (value)young_end; iter = next_minor_block(domain_state, iter)) { value hd = Hd_hp(iter); if (hd != 0) { value curr = Val_hp(iter); tag_t tag = Tag_hd (hd); if (tag < No_scan_tag && tag != Cont_tag) { // FIXME: should scan Cont_tag for (i = 0; i < Wosize_hd(hd); i++) { value* f = Op_val(curr) + i; if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) && *f < curr) { CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f)); } } } } } caml_addrmap_clear(&young_young_ptrs); } #endif caml_gc_log ("Minor collection of domain %d starting", domain->state->id); caml_ev_begin("minor_gc"); caml_ev_begin("minor_gc/roots"); caml_do_local_roots(&oldify_one, &st, domain, 0); caml_scan_stack(&oldify_one, &st, domain_state->current_stack); for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value x = **r; oldify_one (&st, x, &x); } caml_ev_end("minor_gc/roots"); caml_ev_begin("minor_gc/promote"); oldify_mopup (&st); caml_ev_end("minor_gc/promote"); caml_ev_begin("minor_gc/ephemerons"); for (re = minor_tables->ephe_ref.base; re < minor_tables->ephe_ref.ptr; re++) { CAMLassert (Ephe_domain(re->ephe) == domain); if (re->offset == CAML_EPHE_DATA_OFFSET) { /* Data field has already been handled in oldify_mopup. Handle only * keys here. */ continue; } value* key = &Op_val(re->ephe)[re->offset]; if (*key != caml_ephe_none && Is_block(*key) && is_in_interval(*key, young_ptr, young_end)) { resolve_infix_val(key); if (Hd_val(*key) == 0) { /* value copied to major heap */ *key = Op_val(*key)[0]; } else { CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end)); *key = caml_ephe_none; Ephe_data(re->ephe) = caml_ephe_none; } } } caml_ev_end("minor_gc/ephemerons"); caml_ev_begin("minor_gc/update_minor_tables"); for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value v = **r; if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) { value vnew; header_t hd = Hd_val(v); int offset = 0; if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); v -= offset; } CAMLassert (Hd_val(v) == 0); vnew = Op_val(v)[0] + offset; CAMLassert (Is_block(vnew) && !Is_minor(vnew)); CAMLassert (Hd_val(vnew)); if (Tag_hd(hd) == Infix_tag) { CAMLassert(Tag_val(vnew) == Infix_tag); v += offset; } if (caml_domain_alone()) { **r = vnew; ++rewrite_successes; } else { if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew)) ++rewrite_successes; else ++rewrite_failures; } } } CAMLassert (!caml_domain_alone() || rewrite_failures == 0); caml_ev_end("minor_gc/update_minor_tables"); caml_ev_begin("minor_gc/finalisers"); caml_final_update_last_minor(domain); /* Run custom block finalisation of dead minor values */ for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) { value v = elt->block; if (Hd_val(v) == 0) { /* !!caml_adjust_gc_speed(elt->mem, elt->max); */ } else { /* Block will be freed: call finalisation function, if any */ void (*final_fun)(value) = Custom_ops_val(v)->finalize; if (final_fun != NULL) final_fun(v); } } caml_final_empty_young(domain); caml_ev_end("minor_gc/finalisers"); clear_table ((struct generic_table *)&minor_tables->major_ref); clear_table ((struct generic_table *)&minor_tables->minor_ref); clear_table ((struct generic_table *)&minor_tables->ephe_ref); clear_table ((struct generic_table *)&minor_tables->custom); domain_state->young_ptr = domain_state->young_end; domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes); domain_state->stat_minor_collections++; domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words; caml_ev_end("minor_gc"); caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u", domain->state->id, 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes, (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures); } else { caml_final_empty_young(domain); caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id); } #ifdef DEBUG { value *p; for (p = (value *) domain_state->young_start; p < (value *) domain_state->young_end; ++p){ *p = Debug_free_minor; } } #endif }
static void do_compaction_r (CAML_R) { char *ch, *chend; Assert (caml_gc_phase == Phase_idle); caml_gc_message (0x10, "Compacting heap...\n", 0); #ifdef DEBUG caml_heap_check_r (ctx); #endif /* First pass: encode all noninfix headers. */ { ch = caml_heap_start; while (ch != NULL){ header_t *p = (header_t *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ header_t hd = Hd_hp (p); mlsize_t sz = Wosize_hd (hd); if (Is_blue_hd (hd)){ /* Free object. Give it a string tag. */ Hd_hp (p) = Make_ehd (sz, String_tag, 3); }else{ Assert (Is_white_hd (hd)); /* Live object. Keep its tag. */ Hd_hp (p) = Make_ehd (sz, Tag_hd (hd), 3); } p += Whsize_wosize (sz); } ch = Chunk_next (ch); } } /* Second pass: invert pointers. Link infix headers in each block in an inverted list of inverted lists. Don't forget roots and weak pointers. */ { /* Invert roots first because the threads library needs some heap data structures to find its roots. Fortunately, it doesn't need the headers (see above). */ caml_do_roots_r (ctx, invert_root_r); caml_final_do_weak_roots_r (ctx, invert_root_r); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; size_t sz, i; tag_t t; word *infixes; while (Ecolor (q) == 0) q = * (word *) q; sz = Whsize_ehd (q); t = Tag_ehd (q); if (t == Infix_tag){ /* Get the original header of this block. */ infixes = p + sz; q = *infixes; while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3); sz = Whsize_ehd (q); t = Tag_ehd (q); } if (t < No_scan_tag){ for (i = 1; i < sz; i++) invert_pointer_at_r (ctx, &(p[i])); } p += sz; } ch = Chunk_next (ch); } /* Invert weak pointers. */ { value *pp = &caml_weak_list_head; value p; word q; size_t sz, i; while (1){ p = *pp; if (p == (value) NULL) break; q = Hd_val (p); while (Ecolor (q) == 0) q = * (word *) q; sz = Wosize_ehd (q); for (i = 1; i < sz; i++){ if (Field (p,i) != caml_weak_none){ invert_pointer_at_r (ctx, (word *) &(Field (p,i))); } } invert_pointer_at_r (ctx, (word *) pp); pp = &Field (p, 0); } } } /* Third pass: reallocate virtually; revert pointers; decode headers. Rebuild infix headers. */ { init_compact_allocate_r (ctx); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; if (Ecolor (q) == 0 || Tag_ehd (q) == Infix_tag){ /* There were (normal or infix) pointers to this block. */ size_t sz; tag_t t; char *newadr; word *infixes = NULL; while (Ecolor (q) == 0) q = * (word *) q; sz = Whsize_ehd (q); t = Tag_ehd (q); if (t == Infix_tag){ /* Get the original header of this block. */ infixes = p + sz; q = *infixes; Assert (Ecolor (q) == 2); while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3); sz = Whsize_ehd (q); t = Tag_ehd (q); } newadr = compact_allocate_r (ctx, Bsize_wsize (sz)); q = *p; while (Ecolor (q) == 0){ word next = * (word *) q; * (word *) q = (word) Val_hp (newadr); q = next; } *p = Make_header (Wosize_whsize (sz), t, Caml_white); if (infixes != NULL){ /* Rebuild the infix headers and revert the infix pointers. */ while (Ecolor ((word) infixes) != 3){ infixes = (word *) ((word) infixes & ~(uintnat) 3); q = *infixes; while (Ecolor (q) == 2){ word next; q = (word) q & ~(uintnat) 3; next = * (word *) q; * (word *) q = (word) Val_hp ((word *) newadr + (infixes - p)); q = next; } Assert (Ecolor (q) == 1 || Ecolor (q) == 3); *infixes = Make_header (infixes - p, Infix_tag, Caml_white); infixes = (word *) q; } } p += sz; }else{ Assert (Ecolor (q) == 3); /* This is guaranteed only if caml_compact_heap was called after a nonincremental major GC: Assert (Tag_ehd (q) == String_tag); */ /* No pointers to the header and no infix header: the object was free. */ *p = Make_header (Wosize_ehd (q), Tag_ehd (q), Caml_blue); p += Whsize_ehd (q); } } ch = Chunk_next (ch); } } /* Fourth pass: reallocate and move objects. Use the exact same allocation algorithm as pass 3. */ { init_compact_allocate_r (ctx); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; if (Color_hd (q) == Caml_white){ size_t sz = Bhsize_hd (q); char *newadr = compact_allocate_r (ctx, sz); memmove (newadr, p, sz); p += Wsize_bsize (sz); }else{ Assert (Color_hd (q) == Caml_blue); p += Whsize_hd (q); } } ch = Chunk_next (ch); } } /* Shrink the heap if needed. */ { /* Find the amount of live data and the unshrinkable free space. */ asize_t live = 0; asize_t free = 0; asize_t wanted; ch = caml_heap_start; while (ch != NULL){ if (Chunk_alloc (ch) != 0){ live += Wsize_bsize (Chunk_alloc (ch)); free += Wsize_bsize (Chunk_size (ch) - Chunk_alloc (ch)); } ch = Chunk_next (ch); } /* Add up the empty chunks until there are enough, then remove the other empty chunks. */ wanted = caml_percent_free * (live / 100 + 1); ch = caml_heap_start; while (ch != NULL){ char *next_chunk = Chunk_next (ch); /* Chunk_next (ch) will be erased */ if (Chunk_alloc (ch) == 0){ if (free < wanted){ free += Wsize_bsize (Chunk_size (ch)); }else{ caml_shrink_heap_r (ctx, ch); } } ch = next_chunk; } } /* Rebuild the free list. */ { ch = caml_heap_start; caml_fl_reset_r (ctx); while (ch != NULL){ if (Chunk_size (ch) > Chunk_alloc (ch)){ caml_make_free_blocks_r (ctx, (value *) (ch + Chunk_alloc (ch)), Wsize_bsize (Chunk_size(ch)-Chunk_alloc(ch)), 1, Caml_white); } ch = Chunk_next (ch); } } ++ caml_stat_compactions; caml_gc_message (0x10, "done.\n", 0); }
static void extern_rec(value v) { struct code_fragment * cf; struct extern_item * sp; sp = extern_stack; while(1) { if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8(CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16(CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64(CODE_INT64, n); #endif } else writecode32(CODE_INT32, n); goto next_item; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; continue; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32(CODE_BLOCK32, hd); } goto next_item; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8(CODE_SHARED8, d); } else if (d < 0x10000) { writecode16(CODE_SHARED16, d); } else { writecode32(CODE_SHARED32, d); } goto next_item; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8(CODE_STRING8, len); } else { writecode32(CODE_STRING32, len); } writeblock(String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location(v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location(v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8(CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32(CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location(v); break; } case Abstract_tag: extern_invalid_argument("output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec(v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64) = Custom_ops_val(v)->serialize; if (serialize == NULL) extern_invalid_argument("output_value: abstract value (Custom)"); Write(CODE_CUSTOM); writeblock(ident, strlen(ident) + 1); Custom_ops_val(v)->serialize(v, &sz_32, &sz_64); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); extern_record_location(v); break; } default: { value field0; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64(CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32(CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location(v); /* Remember that we still have to serialize fields 1 ... sz - 1 */ if (sz > 1) { sp++; if (sp >= extern_stack_limit) sp = extern_resize_stack(sp); sp->v = &Field(v,1); sp->count = sz-1; } /* Continue serialization with the first field */ v = field0; continue; } } } else if ((cf = extern_find_code((char *) v)) != NULL) { if (!extern_closures) extern_invalid_argument("output_value: functional value"); writecode32(CODE_CODEPOINTER, (char *) v - cf->code_start); writeblock((char *) cf->digest, 16); } else { extern_invalid_argument("output_value: abstract value (outside heap)"); } next_item: /* Pop one more item to marshal, if any */ if (sp == extern_stack) { /* We are done. Cleanup the stack and leave the function */ extern_free_stack(); return; } v = *((sp->v)++); if (--(sp->count) == 0) sp--; }