void forward_pointer (void* state, value v, value *p) { header_t hd; mlsize_t offset; value fwd; struct domain* promote_domain = state; caml_domain_state* domain_state = promote_domain ? promote_domain->state : Caml_state; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; if (Is_block (v) && is_in_interval((value)Hp_val(v), young_ptr, young_end)) { hd = Hd_val(v); if (hd == 0) { *p = Op_val(v)[0]; CAMLassert (Is_block(*p) && !Is_minor(*p)); } else if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); fwd = 0; forward_pointer (state, v - offset, &fwd); if (fwd) *p = fwd + offset; } } }
void caml_oldify_one (value v, value *p) { value result; header_t hd; mlsize_t sz, i; tag_t tag; tail_call: if (Is_block (v) && Is_young (v)){ if (Hp_val(v) < caml_young_ptr) printf("%lx, %lx\n", Hp_val(v), caml_young_ptr); Assert (Hp_val (v) >= caml_young_ptr); hd = Hd_val (v); if (hd == 0){ /* If already forwarded */ *p = Field (v, 0); /* then forward pointer is first field. */ }else{ tag = Tag_hd (hd); if (tag < Infix_tag){ value field0; sz = Wosize_hd (hd); result = caml_alloc_shr (sz, tag); *p = result; field0 = Field (v, 0); Hd_val (v) = 0; /* Set forward flag */ Field (v, 0) = result; /* and forward pointer. */ if (sz > 1){ Field (result, 0) = field0; Field (result, 1) = oldify_todo_list; /* Add this block */ oldify_todo_list = v; /* to the "to do" list. */ }else{ Assert (sz == 1); p = &Field (result, 0); v = field0; goto tail_call; } }else if (tag >= No_scan_tag){ sz = Wosize_hd (hd); result = caml_alloc_shr (sz, tag); for (i = 0; i < sz; i++) Field (result, i) = Field (v, i); Hd_val (v) = 0; /* Set forward flag */ Field (v, 0) = result; /* and forward pointer. */ *p = result; }else if (tag == Infix_tag){ mlsize_t offset = Infix_offset_hd (hd); caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */ *p += offset; }else{ value f = Forward_val (v); tag_t ft = 0; int vv = 1; Assert (tag == Forward_tag); if (Is_block (f)){ vv = Is_in_value_area(f); if (vv) { ft = Tag_val (Hd_val (f) == 0 ? Field (f, 0) : f); } } if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){ /* Do not short-circuit the pointer. Copy as a normal block. */ Assert (Wosize_hd (hd) == 1); result = caml_alloc_shr (1, Forward_tag); *p = result; Hd_val (v) = 0; /* Set (GC) forward flag */ Field (v, 0) = result; /* and forward pointer. */ p = &Field (result, 0); v = f; goto tail_call; }else{ v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } } }else{ *p = v; } }
/* Make sure the minor heap is empty by performing a minor collection if * needed. */ void caml_empty_minor_heap (void) { uintnat minor_allocated_bytes = caml_domain_state->young_end - caml_domain_state->young_ptr; unsigned rewritten = 0; struct caml_ref_entry *r; caml_save_stack_gc(); stat_live_bytes = 0; if (minor_allocated_bytes != 0){ caml_gc_log ("Minor collection starting"); caml_do_local_roots(&caml_oldify_one, caml_domain_self()); for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){ value x; caml_oldify_one (Op_val(r->obj)[r->field], &x); } for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) { caml_scan_dirty_stack(&caml_oldify_one, r->obj); } caml_oldify_mopup (); for (r = caml_domain_state->remembered_set->ref.base; r < caml_domain_state->remembered_set->ref.ptr; r++){ value v = Op_val(r->obj)[r->field]; if (Is_block(v) && Is_young(v)) { Assert (Hp_val (v) >= caml_domain_state->young_ptr); value vnew; header_t hd = Hd_val(v); // FIXME: call oldify_one here? if (Is_promoted_hd(hd)) { vnew = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v); } else { int offset = 0; if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); v -= offset; } Assert (Hd_val (v) == 0); vnew = Op_val(v)[0] + offset; } Assert(Is_block(vnew) && !Is_young(vnew)); Assert(Hd_val(vnew)); if (Tag_hd(hd) == Infix_tag) { Assert(Tag_val(vnew) == Infix_tag); } rewritten += caml_atomic_cas_field(r->obj, r->field, v, vnew); } } caml_addrmap_iter(&caml_domain_state->remembered_set->promotion, unpin_promoted_object); if (caml_domain_state->young_ptr < caml_domain_state->young_start) caml_domain_state->young_ptr = caml_domain_state->young_start; caml_stat_minor_words += Wsize_bsize (minor_allocated_bytes); caml_domain_state->young_ptr = caml_domain_state->young_end; clear_table (&caml_domain_state->remembered_set->ref); caml_addrmap_clear(&caml_domain_state->remembered_set->promotion); caml_addrmap_clear(&caml_domain_state->remembered_set->promotion_rev); caml_gc_log ("Minor collection completed: %u of %u kb live, %u pointers rewritten", (unsigned)stat_live_bytes/1024, (unsigned)minor_allocated_bytes/1024, rewritten); } for (r = caml_domain_state->remembered_set->fiber_ref.base; r < caml_domain_state->remembered_set->fiber_ref.ptr; r++) { caml_scan_dirty_stack(&caml_darken, r->obj); caml_clean_stack(r->obj); } clear_table (&caml_domain_state->remembered_set->fiber_ref); caml_restore_stack_gc(); #ifdef DEBUG { value *p; for (p = (value *) caml_domain_state->young_start; p < (value *) caml_domain_state->young_end; ++p){ *p = Debug_free_minor; } ++ minor_gc_counter; } #endif }
static void caml_oldify_one (value v, value *p) { value result; header_t hd; mlsize_t sz, i; tag_t tag; tail_call: if (Is_block (v) && Is_young (v)){ Assert (Hp_val (v) >= caml_domain_state->young_ptr); hd = Hd_val (v); stat_live_bytes += Bhsize_hd(hd); if (Is_promoted_hd (hd)) { *p = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v); } else if (hd == 0){ /* If already forwarded */ *p = Op_val(v)[0]; /* then forward pointer is first field. */ }else{ tag = Tag_hd (hd); if (tag < Infix_tag){ value field0; sz = Wosize_hd (hd); result = alloc_shared (sz, tag); *p = result; if (tag == Stack_tag) { memcpy((void*)result, (void*)v, sizeof(value) * sz); Hd_val (v) = 0; Op_val(v)[0] = result; Op_val(v)[1] = oldify_todo_list; oldify_todo_list = v; } else { field0 = Op_val(v)[0]; Hd_val (v) = 0; /* Set forward flag */ Op_val(v)[0] = result; /* and forward pointer. */ if (sz > 1){ Op_val (result)[0] = field0; Op_val (result)[1] = oldify_todo_list; /* Add this block */ oldify_todo_list = v; /* to the "to do" list. */ }else{ Assert (sz == 1); p = Op_val(result); v = field0; goto tail_call; } } }else if (tag >= No_scan_tag){ sz = Wosize_hd (hd); result = alloc_shared(sz, tag); for (i = 0; i < sz; i++) Op_val (result)[i] = Op_val(v)[i]; Hd_val (v) = 0; /* Set forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ *p = result; }else if (tag == Infix_tag){ mlsize_t offset = Infix_offset_hd (hd); caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */ *p += offset; } else{ value f = Forward_val (v); tag_t ft = 0; int vv = 1; Assert (tag == Forward_tag); if (Is_block (f)){ if (Is_young (f)){ vv = 1; ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f); }else{ vv = 1; if (vv){ ft = Tag_val (f); } } } if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){ /* Do not short-circuit the pointer. Copy as a normal block. */ Assert (Wosize_hd (hd) == 1); result = alloc_shared (1, Forward_tag); *p = result; Hd_val (v) = 0; /* Set (GC) forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ p = Op_val (result); v = f; goto tail_call; }else{ v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } } }else{ *p = v; } }
static void extern_rec(value v) { tailcall: if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8(CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16(CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64(CODE_INT64, n); #endif } else writecode32(CODE_INT32, n); return; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; goto tailcall; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32(CODE_BLOCK32, hd); } return; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8(CODE_SHARED8, d); } else if (d < 0x10000) { writecode16(CODE_SHARED16, d); } else { writecode32(CODE_SHARED32, d); } return; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8(CODE_STRING8, len); } else { writecode32(CODE_STRING32, len); } writeblock(String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location(v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location(v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8(CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32(CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location(v); break; } case Abstract_tag: extern_invalid_argument("output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec(v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64) = Custom_ops_val(v)->serialize; if (serialize == NULL) extern_invalid_argument("output_value: abstract value (Custom)"); Write(CODE_CUSTOM); writeblock(ident, strlen(ident) + 1); Custom_ops_val(v)->serialize(v, &sz_32, &sz_64); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); extern_record_location(v); break; } default: { value field0; mlsize_t i; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64(CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32(CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location(v); if (sz == 1) { v = field0; } else { extern_rec(field0); for (i = 1; i < sz - 1; i++) extern_rec(Field(v, i)); v = Field(v, i); } goto tailcall; } } } else if ((char *) v >= caml_code_area_start &&
static void extern_rec(value v) { struct code_fragment * cf; struct extern_item * sp; sp = extern_stack; while(1) { if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8(CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16(CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64(CODE_INT64, n); #endif } else writecode32(CODE_INT32, n); goto next_item; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; continue; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32(CODE_BLOCK32, hd); } goto next_item; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8(CODE_SHARED8, d); } else if (d < 0x10000) { writecode16(CODE_SHARED16, d); } else { writecode32(CODE_SHARED32, d); } goto next_item; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8(CODE_STRING8, len); } else { writecode32(CODE_STRING32, len); } writeblock(String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location(v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location(v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8(CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32(CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location(v); break; } case Abstract_tag: extern_invalid_argument("output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec(v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64) = Custom_ops_val(v)->serialize; if (serialize == NULL) extern_invalid_argument("output_value: abstract value (Custom)"); Write(CODE_CUSTOM); writeblock(ident, strlen(ident) + 1); Custom_ops_val(v)->serialize(v, &sz_32, &sz_64); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); extern_record_location(v); break; } default: { value field0; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64(CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32(CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location(v); /* Remember that we still have to serialize fields 1 ... sz - 1 */ if (sz > 1) { sp++; if (sp >= extern_stack_limit) sp = extern_resize_stack(sp); sp->v = &Field(v,1); sp->count = sz-1; } /* Continue serialization with the first field */ v = field0; continue; } } } else if ((cf = extern_find_code((char *) v)) != NULL) { if (!extern_closures) extern_invalid_argument("output_value: functional value"); writecode32(CODE_CODEPOINTER, (char *) v - cf->code_start); writeblock((char *) cf->digest, 16); } else { extern_invalid_argument("output_value: abstract value (outside heap)"); } next_item: /* Pop one more item to marshal, if any */ if (sp == extern_stack) { /* We are done. Cleanup the stack and leave the function */ extern_free_stack(); return; } v = *((sp->v)++); if (--(sp->count) == 0) sp--; }
void caml_empty_minor_heap_domain (struct domain* domain) { CAMLnoalloc; caml_domain_state* domain_state = domain->state; struct caml_minor_tables *minor_tables = domain_state->minor_tables; unsigned rewrite_successes = 0; unsigned rewrite_failures = 0; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; uintnat minor_allocated_bytes = young_end - young_ptr; struct oldify_state st = {0}; value **r; struct caml_ephe_ref_elt *re; struct caml_custom_elt *elt; st.promote_domain = domain; if (minor_allocated_bytes != 0) { uintnat prev_alloc_words = domain_state->allocated_words; #ifdef DEBUG /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers from older to younger objects */ { struct addrmap young_young_ptrs = ADDRMAP_INIT; mlsize_t i; value iter; for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) { *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1; } for (iter = (value)young_ptr; iter < (value)young_end; iter = next_minor_block(domain_state, iter)) { value hd = Hd_hp(iter); if (hd != 0) { value curr = Val_hp(iter); tag_t tag = Tag_hd (hd); if (tag < No_scan_tag && tag != Cont_tag) { // FIXME: should scan Cont_tag for (i = 0; i < Wosize_hd(hd); i++) { value* f = Op_val(curr) + i; if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) && *f < curr) { CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f)); } } } } } caml_addrmap_clear(&young_young_ptrs); } #endif caml_gc_log ("Minor collection of domain %d starting", domain->state->id); caml_ev_begin("minor_gc"); caml_ev_begin("minor_gc/roots"); caml_do_local_roots(&oldify_one, &st, domain, 0); caml_scan_stack(&oldify_one, &st, domain_state->current_stack); for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value x = **r; oldify_one (&st, x, &x); } caml_ev_end("minor_gc/roots"); caml_ev_begin("minor_gc/promote"); oldify_mopup (&st); caml_ev_end("minor_gc/promote"); caml_ev_begin("minor_gc/ephemerons"); for (re = minor_tables->ephe_ref.base; re < minor_tables->ephe_ref.ptr; re++) { CAMLassert (Ephe_domain(re->ephe) == domain); if (re->offset == CAML_EPHE_DATA_OFFSET) { /* Data field has already been handled in oldify_mopup. Handle only * keys here. */ continue; } value* key = &Op_val(re->ephe)[re->offset]; if (*key != caml_ephe_none && Is_block(*key) && is_in_interval(*key, young_ptr, young_end)) { resolve_infix_val(key); if (Hd_val(*key) == 0) { /* value copied to major heap */ *key = Op_val(*key)[0]; } else { CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end)); *key = caml_ephe_none; Ephe_data(re->ephe) = caml_ephe_none; } } } caml_ev_end("minor_gc/ephemerons"); caml_ev_begin("minor_gc/update_minor_tables"); for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value v = **r; if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) { value vnew; header_t hd = Hd_val(v); int offset = 0; if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); v -= offset; } CAMLassert (Hd_val(v) == 0); vnew = Op_val(v)[0] + offset; CAMLassert (Is_block(vnew) && !Is_minor(vnew)); CAMLassert (Hd_val(vnew)); if (Tag_hd(hd) == Infix_tag) { CAMLassert(Tag_val(vnew) == Infix_tag); v += offset; } if (caml_domain_alone()) { **r = vnew; ++rewrite_successes; } else { if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew)) ++rewrite_successes; else ++rewrite_failures; } } } CAMLassert (!caml_domain_alone() || rewrite_failures == 0); caml_ev_end("minor_gc/update_minor_tables"); caml_ev_begin("minor_gc/finalisers"); caml_final_update_last_minor(domain); /* Run custom block finalisation of dead minor values */ for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) { value v = elt->block; if (Hd_val(v) == 0) { /* !!caml_adjust_gc_speed(elt->mem, elt->max); */ } else { /* Block will be freed: call finalisation function, if any */ void (*final_fun)(value) = Custom_ops_val(v)->finalize; if (final_fun != NULL) final_fun(v); } } caml_final_empty_young(domain); caml_ev_end("minor_gc/finalisers"); clear_table ((struct generic_table *)&minor_tables->major_ref); clear_table ((struct generic_table *)&minor_tables->minor_ref); clear_table ((struct generic_table *)&minor_tables->ephe_ref); clear_table ((struct generic_table *)&minor_tables->custom); domain_state->young_ptr = domain_state->young_end; domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes); domain_state->stat_minor_collections++; domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words; caml_ev_end("minor_gc"); caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u", domain->state->id, 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes, (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures); } else { caml_final_empty_young(domain); caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id); } #ifdef DEBUG { value *p; for (p = (value *) domain_state->young_start; p < (value *) domain_state->young_end; ++p){ *p = Debug_free_minor; } } #endif }
/* Note that the tests on the tag depend on the fact that Infix_tag, Forward_tag, and No_scan_tag are contiguous. */ static void oldify_one (void* st_v, value v, value *p) { struct oldify_state* st = st_v; value result; header_t hd; mlsize_t sz, i; mlsize_t infix_offset; tag_t tag; caml_domain_state* domain_state = st->promote_domain ? st->promote_domain->state : Caml_state; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; CAMLassert (domain_state->young_start <= domain_state->young_ptr && domain_state->young_ptr <= domain_state->young_end); tail_call: if (!(Is_block(v) && is_in_interval((value)Hp_val(v), young_ptr, young_end))) { /* not a minor block */ *p = v; return; } infix_offset = 0; do { hd = Hd_val (v); if (hd == 0) { /* already forwarded, forward pointer is first field. */ *p = Op_val(v)[0] + infix_offset; return; } tag = Tag_hd (hd); if (tag == Infix_tag) { /* Infix header, retry with the real block */ CAMLassert (infix_offset == 0); infix_offset = Infix_offset_hd (hd); CAMLassert(infix_offset > 0); v -= infix_offset; } } while (tag == Infix_tag); if (((value)Hp_val(v)) > st->oldest_promoted) { st->oldest_promoted = (value)Hp_val(v); } if (tag == Cont_tag) { struct stack_info* stk = Ptr_val(Op_val(v)[0]); CAMLassert(Wosize_hd(hd) == 1 && infix_offset == 0); result = alloc_shared(1, Cont_tag); *p = result; Op_val(result)[0] = Val_ptr(stk); *Hp_val (v) = 0; Op_val(v)[0] = result; if (stk != NULL) caml_scan_stack(&oldify_one, st, stk); } else if (tag < Infix_tag) { value field0; sz = Wosize_hd (hd); st->live_bytes += Bhsize_hd(hd); result = alloc_shared (sz, tag); *p = result + infix_offset; field0 = Op_val(v)[0]; CAMLassert (!Is_debug_tag(field0)); *Hp_val (v) = 0; /* Set forward flag */ Op_val(v)[0] = result; /* and forward pointer. */ if (sz > 1){ Op_val (result)[0] = field0; Op_val (result)[1] = st->todo_list; /* Add this block */ st->todo_list = v; /* to the "to do" list. */ }else{ CAMLassert (sz == 1); p = Op_val(result); v = field0; goto tail_call; } } else if (tag >= No_scan_tag) { sz = Wosize_hd (hd); st->live_bytes += Bhsize_hd(hd); result = alloc_shared(sz, tag); for (i = 0; i < sz; i++) { value curr = Op_val(v)[i]; Op_val (result)[i] = curr; } *Hp_val (v) = 0; /* Set forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ CAMLassert (infix_offset == 0); *p = result; } else { CAMLassert (tag == Forward_tag); CAMLassert (infix_offset == 0); value f = Forward_val (v); tag_t ft = 0; if (Is_block (f)) { ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f); } if (ft == Forward_tag || ft == Lazy_tag || ft == Double_tag) { /* Do not short-circuit the pointer. Copy as a normal block. */ CAMLassert (Wosize_hd (hd) == 1); st->live_bytes += Bhsize_hd(hd); result = alloc_shared (1, Forward_tag); *p = result; *Hp_val (v) = 0; /* Set (GC) forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ p = Op_val (result); v = f; goto tail_call; } else { v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } }
static void extern_rec_r(CAML_R, value v) { struct code_fragment * cf; struct extern_item * sp; sp = extern_stack; while(1) { //?????DUMP("QQQ 0x%lx, or %li ", v, v); if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8_r(ctx, CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16_r(ctx, CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64_r(ctx, CODE_INT64, n); #endif } else writecode32_r(ctx, CODE_INT32, n); goto next_item; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); //DUMP("dumping %p, tag %i, size %i", (void*)v, (int)tag, (int)sz); // !!!!!!!!!!!!!!! if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; continue; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32_r(ctx, CODE_BLOCK32, hd); } goto next_item; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8_r(ctx, CODE_SHARED8, d); } else if (d < 0x10000) { writecode16_r(ctx, CODE_SHARED16, d); } else { writecode32_r(ctx, CODE_SHARED32, d); } goto next_item; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8_r(ctx, CODE_STRING8, len); } else { writecode32_r(ctx, CODE_STRING32, len); } writeblock_r(ctx, String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location_r(ctx, v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument_r(ctx, "output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location_r(ctx,v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument_r(ctx, "output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8_r(ctx, CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32_r(ctx, CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location_r(ctx, v); break; } case Abstract_tag: extern_invalid_argument_r(ctx, "output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32_r(ctx,CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec_r(ctx, v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64); //printf("[object at %p, which is a %s custom: BEGIN\n", (void*)v, Custom_ops_val(v)->identifier); if(extern_cross_context){ //printf("About the object at %p, which is a %s custom: USING a cross-context serializer\n", (void*)v, Custom_ops_val(v)->identifier); serialize = Custom_ops_val(v)->cross_context_serialize; } else{ //printf("About the object at %p, which is a %s custom: NOT using a cross-context serializer\n", (void*)v, Custom_ops_val(v)->identifier); serialize = Custom_ops_val(v)->serialize; } //printf("Still alive 100\n"); if (serialize == NULL){ ////// //struct custom_operations *o = Custom_ops_val(v); //printf("About the object at %p, which is a %s custom\n", (void*)v, Custom_ops_val(v)->identifier); volatile int a = 1; a /= 0; /////////// extern_invalid_argument_r(ctx, "output_value: abstract value (Custom)"); } //printf("Still alive 200\n"); Write(CODE_CUSTOM); //printf("Still alive 300\n"); writeblock_r(ctx, ident, strlen(ident) + 1); //printf("Still alive 400\n"); serialize(v, &sz_32, &sz_64); //printf("Still alive 500\n"); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); //printf("Still alive 600\n"); extern_record_location_r(ctx,v); // This temporarily breaks the object, by replacing it with a forwarding pointer //printf("object at %p, which is a custom: END\n", (void*)v); break; } default: { value field0; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64_r(ctx, CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32_r(ctx, CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location_r(ctx, v); /* Remember that we still have to serialize fields 1 ... sz - 1 */ if (sz > 1) { sp++; if (sp >= extern_stack_limit) sp = extern_resize_stack_r(ctx, sp); sp->v = &Field(v,1); sp->count = sz-1; } /* Continue serialization with the first field */ v = field0; continue; } } } else if ((cf = extern_find_code_r(ctx, (char *) v)) != NULL) { if (!extern_closures){ extern_invalid_argument_r(ctx, "output_value: functional value"); // FIXME: this is the correct version. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! //DUMP("output_value: functional value"); {volatile int a = 1; a /= 0;} } //fprintf(stderr, "ZZZZ dumping a code pointer: BEGIN\n"); //DUMP("dumping a code pointer 0x%lx, or %li; code start is at %p", v, v, cf->code_start); writecode32_r(ctx, CODE_CODEPOINTER, (char *) v - cf->code_start); writeblock_r(ctx, (char *) cf->digest, 16); //dump_digest(cf->digest); //fprintf(stderr, "ZZZZ dumping a code pointer: END\n"); } else { if(extern_cross_context){ fprintf(stderr, "ZZZZ working on the external pointer: %p, which is to say %li [cf is %p]\n", (void*)v, (long)v, cf); //fprintf(stderr, "ZZZZ I'm doing a horrible, horrible thing: serializing the pointer as a tagged 0.\n"); DUMP("about to crash in the strange case I'm debugging"); /* DUMP("the object is 0x%lx, or %li ", v, v); */ /* DUMP("probably crashing now"); */ /* DUMP("tag is %i", (int)Tag_val(v)); */ /* DUMP("size is %i", (int)Wosize_val(v)); */ //volatile int a = 1; a /= 0; //extern_rec_r(ctx, Val_int(0)); /* fprintf(stderr, "ZZZZ [This is probably wrong: I'm marshalling an out-of-heap pointer as an int64]\n"); */ /* writecode64_r(ctx, CODE_INT64, (v << 1) | 1); */ extern_invalid_argument_r(ctx, "output_value: abstract value (outside heap) [FIXME: implement]"); } else extern_invalid_argument_r(ctx, "output_value: abstract value (outside heap)"); } next_item: /* Pop one more item to marshal, if any */ if (sp == extern_stack) { /* We are done. Cleanup the stack and leave the function */ extern_free_stack_r(ctx); return; } v = *((sp->v)++); if (--(sp->count) == 0) sp--; }