void caml_oldify_one (value v, value *p) { value result; header_t hd; mlsize_t sz, i; tag_t tag; tail_call: if (Is_block (v) && Is_young (v)){ Assert (Hp_val (v) >= caml_young_ptr); hd = Hd_val (v); if (hd == 0){ /* If already forwarded */ *p = Field (v, 0); /* then forward pointer is first field. */ }else{ tag = Tag_hd (hd); if (tag < Infix_tag){ value field0; sz = Wosize_hd (hd); result = caml_alloc_shr (sz, tag); *p = result; field0 = Field (v, 0); Hd_val (v) = 0; /* Set forward flag */ Field (v, 0) = result; /* and forward pointer. */ if (sz > 1){ Field (result, 0) = field0; Field (result, 1) = oldify_todo_list; /* Add this block */ oldify_todo_list = v; /* to the "to do" list. */ }else{ Assert (sz == 1); p = &Field (result, 0); v = field0; goto tail_call; } }else if (tag >= No_scan_tag){ sz = Wosize_hd (hd); result = caml_alloc_shr (sz, tag); for (i = 0; i < sz; i++) Field (result, i) = Field (v, i); Hd_val (v) = 0; /* Set forward flag */ Field (v, 0) = result; /* and forward pointer. */ *p = result; }else if (tag == Infix_tag){ mlsize_t offset = Infix_offset_hd (hd); caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */ *p += offset; }else{ value f = Forward_val (v); tag_t ft = 0; int vv = 1; Assert (tag == Forward_tag); if (Is_block (f)){ vv = Is_in_value_area(f); if (vv) { ft = Tag_val (Hd_val (f) == 0 ? Field (f, 0) : f); } } if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){ /* Do not short-circuit the pointer. Copy as a normal block. */ Assert (Wosize_hd (hd) == 1); result = caml_alloc_shr (1, Forward_tag); *p = result; Hd_val (v) = 0; /* Set (GC) forward flag */ Field (v, 0) = result; /* and forward pointer. */ p = &Field (result, 0); v = f; goto tail_call; }else{ v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } } }else{ *p = v; } }
void caml_compact_heap (void) { char *ch, *chend; Assert (caml_gc_phase == Phase_idle); caml_gc_message (0x10, "Compacting heap...\n", 0); #ifdef DEBUG caml_heap_check (); #endif /* First pass: encode all noninfix headers. */ { ch = caml_heap_start; while (ch != NULL){ header_t *p = (header_t *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ header_t hd = Hd_hp (p); mlsize_t sz = Wosize_hd (hd); if (Is_blue_hd (hd)){ /* Free object. Give it a string tag. */ Hd_hp (p) = Make_ehd (sz, String_tag, 3); }else{ Assert (Is_white_hd (hd)); /* Live object. Keep its tag. */ Hd_hp (p) = Make_ehd (sz, Tag_hd (hd), 3); } p += Whsize_wosize (sz); } ch = Chunk_next (ch); } } /* Second pass: invert pointers. Link infix headers in each block in an inverted list of inverted lists. Don't forget roots and weak pointers. */ { /* Invert roots first because the threads library needs some heap data structures to find its roots. Fortunately, it doesn't need the headers (see above). */ caml_do_roots (invert_root); caml_final_do_weak_roots (invert_root); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; size_t sz, i; tag_t t; word *infixes; while (Ecolor (q) == 0) q = * (word *) q; sz = Whsize_ehd (q); t = Tag_ehd (q); if (t == Infix_tag){ /* Get the original header of this block. */ infixes = p + sz; q = *infixes; while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3); sz = Whsize_ehd (q); t = Tag_ehd (q); } if (t < No_scan_tag){ for (i = 1; i < sz; i++) invert_pointer_at (&(p[i])); } p += sz; } ch = Chunk_next (ch); } /* Invert weak pointers. */ { value *pp = &caml_weak_list_head; value p; word q; size_t sz, i; while (1){ p = *pp; if (p == (value) NULL) break; q = Hd_val (p); while (Ecolor (q) == 0) q = * (word *) q; sz = Wosize_ehd (q); for (i = 1; i < sz; i++){ if (Field (p,i) != caml_weak_none){ invert_pointer_at ((word *) &(Field (p,i))); } } invert_pointer_at ((word *) pp); pp = &Field (p, 0); } } } /* Third pass: reallocate virtually; revert pointers; decode headers. Rebuild infix headers. */ { init_compact_allocate (); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; if (Ecolor (q) == 0 || Tag_ehd (q) == Infix_tag){ /* There were (normal or infix) pointers to this block. */ size_t sz; tag_t t; char *newadr; word *infixes = NULL; while (Ecolor (q) == 0) q = * (word *) q; sz = Whsize_ehd (q); t = Tag_ehd (q); if (t == Infix_tag){ /* Get the original header of this block. */ infixes = p + sz; q = *infixes; Assert (Ecolor (q) == 2); while (Ecolor (q) != 3) q = * (word *) (q & ~(uintnat)3); sz = Whsize_ehd (q); t = Tag_ehd (q); } newadr = compact_allocate (Bsize_wsize (sz)); q = *p; while (Ecolor (q) == 0){ word next = * (word *) q; * (word *) q = (word) Val_hp (newadr); q = next; } *p = Make_header (Wosize_whsize (sz), t, Caml_white); if (infixes != NULL){ /* Rebuild the infix headers and revert the infix pointers. */ while (Ecolor ((word) infixes) != 3){ infixes = (word *) ((word) infixes & ~(uintnat) 3); q = *infixes; while (Ecolor (q) == 2){ word next; q = (word) q & ~(uintnat) 3; next = * (word *) q; * (word *) q = (word) Val_hp ((word *) newadr + (infixes - p)); q = next; } Assert (Ecolor (q) == 1 || Ecolor (q) == 3); *infixes = Make_header (infixes - p, Infix_tag, Caml_white); infixes = (word *) q; } } p += sz; }else{ Assert (Ecolor (q) == 3); /* This is guaranteed only if caml_compact_heap was called after a nonincremental major GC: Assert (Tag_ehd (q) == String_tag); */ /* No pointers to the header and no infix header: the object was free. */ *p = Make_header (Wosize_ehd (q), Tag_ehd (q), Caml_blue); p += Whsize_ehd (q); } } ch = Chunk_next (ch); } } /* Fourth pass: reallocate and move objects. Use the exact same allocation algorithm as pass 3. */ { init_compact_allocate (); ch = caml_heap_start; while (ch != NULL){ word *p = (word *) ch; chend = ch + Chunk_size (ch); while ((char *) p < chend){ word q = *p; if (Color_hd (q) == Caml_white){ size_t sz = Bhsize_hd (q); char *newadr = compact_allocate (sz); Assert (newadr <= (char *)p); memmove (newadr, p, sz); p += Wsize_bsize (sz); }else{ Assert (Color_hd (q) == Caml_blue); p += Whsize_hd (q); } } ch = Chunk_next (ch); } } /* Shrink the heap if needed. */ { /* Find the amount of live data and the unshrinkable free space. */ asize_t live = 0; asize_t free = 0; asize_t wanted; ch = caml_heap_start; while (ch != NULL){ if (Chunk_alloc (ch) != 0){ live += Wsize_bsize (Chunk_alloc (ch)); free += Wsize_bsize (Chunk_size (ch) - Chunk_alloc (ch)); } ch = Chunk_next (ch); } /* Add up the empty chunks until there are enough, then remove the other empty chunks. */ wanted = caml_percent_free * (live / 100 + 1); ch = caml_heap_start; while (ch != NULL){ char *next_chunk = Chunk_next (ch); /* Chunk_next (ch) will be erased */ if (Chunk_alloc (ch) == 0){ if (free < wanted){ free += Wsize_bsize (Chunk_size (ch)); }else{ caml_shrink_heap (ch); } } ch = next_chunk; } } /* Rebuild the free list. */ { ch = caml_heap_start; caml_fl_reset (); while (ch != NULL){ if (Chunk_size (ch) > Chunk_alloc (ch)){ caml_make_free_blocks ((value *) (ch + Chunk_alloc (ch)), Wsize_bsize (Chunk_size(ch)-Chunk_alloc(ch)), 1); } ch = Chunk_next (ch); } } ++ caml_stat_compactions; caml_gc_message (0x10, "done.\n", 0); }
static void extern_rec(value v) { struct code_fragment * cf; struct extern_item * sp; sp = extern_stack; while(1) { if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8(CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16(CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64(CODE_INT64, n); #endif } else writecode32(CODE_INT32, n); goto next_item; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; continue; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32(CODE_BLOCK32, hd); } goto next_item; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8(CODE_SHARED8, d); } else if (d < 0x10000) { writecode16(CODE_SHARED16, d); } else { writecode32(CODE_SHARED32, d); } goto next_item; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8(CODE_STRING8, len); } else { writecode32(CODE_STRING32, len); } writeblock(String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location(v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location(v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument("output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8(CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32(CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location(v); break; } case Abstract_tag: extern_invalid_argument("output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32(CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec(v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64) = Custom_ops_val(v)->serialize; if (serialize == NULL) extern_invalid_argument("output_value: abstract value (Custom)"); Write(CODE_CUSTOM); writeblock(ident, strlen(ident) + 1); Custom_ops_val(v)->serialize(v, &sz_32, &sz_64); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); extern_record_location(v); break; } default: { value field0; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64(CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32(CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location(v); /* Remember that we still have to serialize fields 1 ... sz - 1 */ if (sz > 1) { sp++; if (sp >= extern_stack_limit) sp = extern_resize_stack(sp); sp->v = &Field(v,1); sp->count = sz-1; } /* Continue serialization with the first field */ v = field0; continue; } } } else if ((cf = extern_find_code((char *) v)) != NULL) { if (!extern_closures) extern_invalid_argument("output_value: functional value"); writecode32(CODE_CODEPOINTER, (char *) v - cf->code_start); writeblock((char *) cf->digest, 16); } else { extern_invalid_argument("output_value: abstract value (outside heap)"); } next_item: /* Pop one more item to marshal, if any */ if (sp == extern_stack) { /* We are done. Cleanup the stack and leave the function */ extern_free_stack(); return; } v = *((sp->v)++); if (--(sp->count) == 0) sp--; }
void caml_debugger(enum event_kind event) { int frame_number; value * frame; intnat i, pos; value val; if (dbg_socket == -1) return; /* Not connected to a debugger. */ /* Reset current frame */ frame_number = 0; frame = caml_extern_sp + 1; /* Report the event to the debugger */ switch(event) { case PROGRAM_START: /* Nothing to report */ goto command_loop; case EVENT_COUNT: putch(dbg_out, REP_EVENT); break; case BREAKPOINT: putch(dbg_out, REP_BREAKPOINT); break; case PROGRAM_EXIT: putch(dbg_out, REP_EXITED); break; case TRAP_BARRIER: putch(dbg_out, REP_TRAP); break; case UNCAUGHT_EXC: putch(dbg_out, REP_UNCAUGHT_EXC); break; } caml_putword(dbg_out, caml_event_count); if (event == EVENT_COUNT || event == BREAKPOINT) { caml_putword(dbg_out, caml_stack_high - frame); caml_putword(dbg_out, (Pc(frame) - caml_start_code) * sizeof(opcode_t)); } else { /* No PC and no stack frame associated with other events */ caml_putword(dbg_out, 0); caml_putword(dbg_out, 0); } caml_flush(dbg_out); command_loop: /* Read and execute the commands sent by the debugger */ while(1) { switch(getch(dbg_in)) { case REQ_SET_EVENT: pos = caml_getword(dbg_in); Assert (pos >= 0); Assert (pos < caml_code_size); caml_set_instruction(caml_start_code + pos / sizeof(opcode_t), EVENT); break; case REQ_SET_BREAKPOINT: pos = caml_getword(dbg_in); Assert (pos >= 0); Assert (pos < caml_code_size); caml_set_instruction(caml_start_code + pos / sizeof(opcode_t), BREAK); break; case REQ_RESET_INSTR: pos = caml_getword(dbg_in); Assert (pos >= 0); Assert (pos < caml_code_size); pos = pos / sizeof(opcode_t); caml_set_instruction(caml_start_code + pos, caml_saved_code[pos]); break; case REQ_CHECKPOINT: #ifndef _WIN32 i = fork(); if (i == 0) { close_connection(); /* Close parent connection. */ open_connection(); /* Open new connection with debugger */ } else { caml_putword(dbg_out, i); caml_flush(dbg_out); } #else caml_fatal_error("error: REQ_CHECKPOINT command"); exit(-1); #endif break; case REQ_GO: caml_event_count = caml_getword(dbg_in); return; case REQ_STOP: exit(0); break; case REQ_WAIT: #ifndef _WIN32 wait(NULL); #else caml_fatal_error("Fatal error: REQ_WAIT command"); exit(-1); #endif break; case REQ_INITIAL_FRAME: frame = caml_extern_sp + 1; /* Fall through */ case REQ_GET_FRAME: caml_putword(dbg_out, caml_stack_high - frame); if (frame < caml_stack_high){ caml_putword(dbg_out, (Pc(frame) - caml_start_code) * sizeof(opcode_t)); }else{ caml_putword (dbg_out, 0); } caml_flush(dbg_out); break; case REQ_SET_FRAME: i = caml_getword(dbg_in); frame = caml_stack_high - i; break; case REQ_UP_FRAME: i = caml_getword(dbg_in); if (frame + Extra_args(frame) + i + 3 >= caml_stack_high) { caml_putword(dbg_out, -1); } else { frame += Extra_args(frame) + i + 3; caml_putword(dbg_out, caml_stack_high - frame); caml_putword(dbg_out, (Pc(frame) - caml_start_code) * sizeof(opcode_t)); } caml_flush(dbg_out); break; case REQ_SET_TRAP_BARRIER: i = caml_getword(dbg_in); caml_trap_barrier = caml_stack_high - i; break; case REQ_GET_LOCAL: i = caml_getword(dbg_in); putval(dbg_out, Locals(frame)[i]); caml_flush(dbg_out); break; case REQ_GET_ENVIRONMENT: i = caml_getword(dbg_in); putval(dbg_out, Field(Env(frame), i)); caml_flush(dbg_out); break; case REQ_GET_GLOBAL: i = caml_getword(dbg_in); putval(dbg_out, Field(caml_global_data, i)); caml_flush(dbg_out); break; case REQ_GET_ACCU: putval(dbg_out, *caml_extern_sp); caml_flush(dbg_out); break; case REQ_GET_HEADER: val = getval(dbg_in); caml_putword(dbg_out, Hd_val(val)); caml_flush(dbg_out); break; case REQ_GET_FIELD: val = getval(dbg_in); i = caml_getword(dbg_in); if (Tag_val(val) != Double_array_tag) { putch(dbg_out, 0); putval(dbg_out, Field(val, i)); } else { double d = Double_field(val, i); putch(dbg_out, 1); caml_really_putblock(dbg_out, (char *) &d, 8); } caml_flush(dbg_out); break; case REQ_MARSHAL_OBJ: val = getval(dbg_in); safe_output_value(dbg_out, val); caml_flush(dbg_out); break; case REQ_GET_CLOSURE_CODE: val = getval(dbg_in); caml_putword(dbg_out, (Code_val(val)-caml_start_code) * sizeof(opcode_t)); caml_flush(dbg_out); break; case REQ_SET_FORK_MODE: caml_debugger_fork_mode = caml_getword(dbg_in); break; } } }
/* Finish the work that was put off by [oldify_one]. Note that [oldify_one] itself is called by oldify_mopup, so we have to be careful to remove the first entry from the list before oldifying its fields. */ static void oldify_mopup (struct oldify_state* st) { value v, new_v, f; mlsize_t i; caml_domain_state* domain_state = st->promote_domain ? st->promote_domain->state : Caml_state; struct caml_ephe_ref_table ephe_ref_table = domain_state->minor_tables->ephe_ref; struct caml_ephe_ref_elt *re; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; int redo = 0; while (st->todo_list != 0) { v = st->todo_list; /* Get the head. */ CAMLassert (Hd_val (v) == 0); /* It must be forwarded. */ new_v = Op_val (v)[0]; /* Follow forward pointer. */ st->todo_list = Op_val (new_v)[1]; /* Remove from list. */ f = Op_val (new_v)[0]; CAMLassert (!Is_debug_tag(f)); if (Is_block (f) && is_in_interval((value)Hp_val(v), young_ptr, young_end)) { oldify_one (st, f, Op_val (new_v)); } for (i = 1; i < Wosize_val (new_v); i++){ f = Op_val (v)[i]; CAMLassert (!Is_debug_tag(f)); if (Is_block (f) && is_in_interval((value)Hp_val(v), young_ptr, young_end)) { oldify_one (st, f, Op_val (new_v) + i); } else { Op_val (new_v)[i] = f; } } CAMLassert (Wosize_val(new_v)); } /* Oldify the data in the minor heap of alive ephemeron During minor collection keys outside the minor heap are considered alive */ for (re = ephe_ref_table.base; re < ephe_ref_table.ptr; re++) { /* look only at ephemeron with data in the minor heap */ if (re->offset == CAML_EPHE_DATA_OFFSET) { value *data = &Ephe_data(re->ephe); if (*data != caml_ephe_none && Is_block(*data) && is_in_interval(*data, young_ptr, young_end)) { resolve_infix_val(data); if (Hd_val(*data) == 0) { /* Value copied to major heap */ *data = Op_val(*data)[0]; } else { if (ephe_check_alive_data(re, young_ptr, young_end)) { oldify_one(st, *data, data); redo = 1; /* oldify_todo_list can still be 0 */ } } } } } if (redo) oldify_mopup (st); }
void caml_empty_minor_heap_domain (struct domain* domain) { CAMLnoalloc; caml_domain_state* domain_state = domain->state; struct caml_minor_tables *minor_tables = domain_state->minor_tables; unsigned rewrite_successes = 0; unsigned rewrite_failures = 0; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; uintnat minor_allocated_bytes = young_end - young_ptr; struct oldify_state st = {0}; value **r; struct caml_ephe_ref_elt *re; struct caml_custom_elt *elt; st.promote_domain = domain; if (minor_allocated_bytes != 0) { uintnat prev_alloc_words = domain_state->allocated_words; #ifdef DEBUG /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers from older to younger objects */ { struct addrmap young_young_ptrs = ADDRMAP_INIT; mlsize_t i; value iter; for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) { *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1; } for (iter = (value)young_ptr; iter < (value)young_end; iter = next_minor_block(domain_state, iter)) { value hd = Hd_hp(iter); if (hd != 0) { value curr = Val_hp(iter); tag_t tag = Tag_hd (hd); if (tag < No_scan_tag && tag != Cont_tag) { // FIXME: should scan Cont_tag for (i = 0; i < Wosize_hd(hd); i++) { value* f = Op_val(curr) + i; if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) && *f < curr) { CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f)); } } } } } caml_addrmap_clear(&young_young_ptrs); } #endif caml_gc_log ("Minor collection of domain %d starting", domain->state->id); caml_ev_begin("minor_gc"); caml_ev_begin("minor_gc/roots"); caml_do_local_roots(&oldify_one, &st, domain, 0); caml_scan_stack(&oldify_one, &st, domain_state->current_stack); for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value x = **r; oldify_one (&st, x, &x); } caml_ev_end("minor_gc/roots"); caml_ev_begin("minor_gc/promote"); oldify_mopup (&st); caml_ev_end("minor_gc/promote"); caml_ev_begin("minor_gc/ephemerons"); for (re = minor_tables->ephe_ref.base; re < minor_tables->ephe_ref.ptr; re++) { CAMLassert (Ephe_domain(re->ephe) == domain); if (re->offset == CAML_EPHE_DATA_OFFSET) { /* Data field has already been handled in oldify_mopup. Handle only * keys here. */ continue; } value* key = &Op_val(re->ephe)[re->offset]; if (*key != caml_ephe_none && Is_block(*key) && is_in_interval(*key, young_ptr, young_end)) { resolve_infix_val(key); if (Hd_val(*key) == 0) { /* value copied to major heap */ *key = Op_val(*key)[0]; } else { CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end)); *key = caml_ephe_none; Ephe_data(re->ephe) = caml_ephe_none; } } } caml_ev_end("minor_gc/ephemerons"); caml_ev_begin("minor_gc/update_minor_tables"); for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) { value v = **r; if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) { value vnew; header_t hd = Hd_val(v); int offset = 0; if (Tag_hd(hd) == Infix_tag) { offset = Infix_offset_hd(hd); v -= offset; } CAMLassert (Hd_val(v) == 0); vnew = Op_val(v)[0] + offset; CAMLassert (Is_block(vnew) && !Is_minor(vnew)); CAMLassert (Hd_val(vnew)); if (Tag_hd(hd) == Infix_tag) { CAMLassert(Tag_val(vnew) == Infix_tag); v += offset; } if (caml_domain_alone()) { **r = vnew; ++rewrite_successes; } else { if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew)) ++rewrite_successes; else ++rewrite_failures; } } } CAMLassert (!caml_domain_alone() || rewrite_failures == 0); caml_ev_end("minor_gc/update_minor_tables"); caml_ev_begin("minor_gc/finalisers"); caml_final_update_last_minor(domain); /* Run custom block finalisation of dead minor values */ for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) { value v = elt->block; if (Hd_val(v) == 0) { /* !!caml_adjust_gc_speed(elt->mem, elt->max); */ } else { /* Block will be freed: call finalisation function, if any */ void (*final_fun)(value) = Custom_ops_val(v)->finalize; if (final_fun != NULL) final_fun(v); } } caml_final_empty_young(domain); caml_ev_end("minor_gc/finalisers"); clear_table ((struct generic_table *)&minor_tables->major_ref); clear_table ((struct generic_table *)&minor_tables->minor_ref); clear_table ((struct generic_table *)&minor_tables->ephe_ref); clear_table ((struct generic_table *)&minor_tables->custom); domain_state->young_ptr = domain_state->young_end; domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes); domain_state->stat_minor_collections++; domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words; caml_ev_end("minor_gc"); caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u", domain->state->id, 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes, (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures); } else { caml_final_empty_young(domain); caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id); } #ifdef DEBUG { value *p; for (p = (value *) domain_state->young_start; p < (value *) domain_state->young_end; ++p){ *p = Debug_free_minor; } } #endif }
/* Note that the tests on the tag depend on the fact that Infix_tag, Forward_tag, and No_scan_tag are contiguous. */ static void oldify_one (void* st_v, value v, value *p) { struct oldify_state* st = st_v; value result; header_t hd; mlsize_t sz, i; mlsize_t infix_offset; tag_t tag; caml_domain_state* domain_state = st->promote_domain ? st->promote_domain->state : Caml_state; char* young_ptr = domain_state->young_ptr; char* young_end = domain_state->young_end; CAMLassert (domain_state->young_start <= domain_state->young_ptr && domain_state->young_ptr <= domain_state->young_end); tail_call: if (!(Is_block(v) && is_in_interval((value)Hp_val(v), young_ptr, young_end))) { /* not a minor block */ *p = v; return; } infix_offset = 0; do { hd = Hd_val (v); if (hd == 0) { /* already forwarded, forward pointer is first field. */ *p = Op_val(v)[0] + infix_offset; return; } tag = Tag_hd (hd); if (tag == Infix_tag) { /* Infix header, retry with the real block */ CAMLassert (infix_offset == 0); infix_offset = Infix_offset_hd (hd); CAMLassert(infix_offset > 0); v -= infix_offset; } } while (tag == Infix_tag); if (((value)Hp_val(v)) > st->oldest_promoted) { st->oldest_promoted = (value)Hp_val(v); } if (tag == Cont_tag) { struct stack_info* stk = Ptr_val(Op_val(v)[0]); CAMLassert(Wosize_hd(hd) == 1 && infix_offset == 0); result = alloc_shared(1, Cont_tag); *p = result; Op_val(result)[0] = Val_ptr(stk); *Hp_val (v) = 0; Op_val(v)[0] = result; if (stk != NULL) caml_scan_stack(&oldify_one, st, stk); } else if (tag < Infix_tag) { value field0; sz = Wosize_hd (hd); st->live_bytes += Bhsize_hd(hd); result = alloc_shared (sz, tag); *p = result + infix_offset; field0 = Op_val(v)[0]; CAMLassert (!Is_debug_tag(field0)); *Hp_val (v) = 0; /* Set forward flag */ Op_val(v)[0] = result; /* and forward pointer. */ if (sz > 1){ Op_val (result)[0] = field0; Op_val (result)[1] = st->todo_list; /* Add this block */ st->todo_list = v; /* to the "to do" list. */ }else{ CAMLassert (sz == 1); p = Op_val(result); v = field0; goto tail_call; } } else if (tag >= No_scan_tag) { sz = Wosize_hd (hd); st->live_bytes += Bhsize_hd(hd); result = alloc_shared(sz, tag); for (i = 0; i < sz; i++) { value curr = Op_val(v)[i]; Op_val (result)[i] = curr; } *Hp_val (v) = 0; /* Set forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ CAMLassert (infix_offset == 0); *p = result; } else { CAMLassert (tag == Forward_tag); CAMLassert (infix_offset == 0); value f = Forward_val (v); tag_t ft = 0; if (Is_block (f)) { ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f); } if (ft == Forward_tag || ft == Lazy_tag || ft == Double_tag) { /* Do not short-circuit the pointer. Copy as a normal block. */ CAMLassert (Wosize_hd (hd) == 1); st->live_bytes += Bhsize_hd(hd); result = alloc_shared (1, Forward_tag); *p = result; *Hp_val (v) = 0; /* Set (GC) forward flag */ Op_val (v)[0] = result; /* and forward pointer. */ p = Op_val (result); v = f; goto tail_call; } else { v = f; /* Follow the forwarding */ goto tail_call; /* then oldify. */ } } }
/* [caml_fl_merge_block] returns the head pointer of the next block after [bp], because merging blocks may change the size of [bp]. */ header_t *caml_fl_merge_block (value bp) { value prev, cur; header_t *adj; header_t hd = Hd_val (bp); mlsize_t prev_wosz; caml_fl_cur_wsz += Whsize_hd (hd); #ifdef DEBUG caml_set_fields (bp, 0, Debug_free_major); #endif prev = caml_fl_merge; cur = Next (prev); /* The sweep code makes sure that this is the right place to insert this block: */ Assert (prev < bp || prev == Fl_head); Assert (cur > bp || cur == Val_NULL); if (policy == Policy_first_fit) truncate_flp (prev); /* If [last_fragment] and [bp] are adjacent, merge them. */ if (last_fragment == Hp_bp (bp)){ mlsize_t bp_whsz = Whsize_val (bp); if (bp_whsz <= Max_wosize){ hd = Make_header (bp_whsz, 0, Caml_white); bp = (value) last_fragment; Hd_val (bp) = hd; caml_fl_cur_wsz += Whsize_wosize (0); } } /* If [bp] and [cur] are adjacent, remove [cur] from the free-list and merge them. */ adj = (header_t *) &Field (bp, Wosize_hd (hd)); if (adj == Hp_val (cur)){ value next_cur = Next (cur); mlsize_t cur_whsz = Whsize_val (cur); if (Wosize_hd (hd) + cur_whsz <= Max_wosize){ Next (prev) = next_cur; if (policy == Policy_next_fit && fl_prev == cur) fl_prev = prev; hd = Make_header (Wosize_hd (hd) + cur_whsz, 0, Caml_blue); Hd_val (bp) = hd; adj = (header_t *) &Field (bp, Wosize_hd (hd)); #ifdef DEBUG fl_last = Val_NULL; Next (cur) = (value) Debug_free_major; Hd_val (cur) = Debug_free_major; #endif cur = next_cur; } } /* If [prev] and [bp] are adjacent merge them, else insert [bp] into the free-list if it is big enough. */ prev_wosz = Wosize_val (prev); if ((header_t *) &Field (prev, prev_wosz) == Hp_val (bp) && prev_wosz + Whsize_hd (hd) < Max_wosize){ Hd_val (prev) = Make_header (prev_wosz + Whsize_hd (hd), 0,Caml_blue); #ifdef DEBUG Hd_val (bp) = Debug_free_major; #endif Assert (caml_fl_merge == prev); }else if (Wosize_hd (hd) != 0){ Hd_val (bp) = Bluehd_hd (hd); Next (bp) = cur; Next (prev) = bp; caml_fl_merge = bp; }else{ /* This is a fragment. Leave it in white but remember it for eventual merging with the next block. */ last_fragment = (header_t *) bp; caml_fl_cur_wsz -= Whsize_wosize (0); } return adj; }
header_t hd_val (value v) { return (header_t)Hd_val(v); }
static void extern_rec_r(CAML_R, value v) { struct code_fragment * cf; struct extern_item * sp; sp = extern_stack; while(1) { //?????DUMP("QQQ 0x%lx, or %li ", v, v); if (Is_long(v)) { intnat n = Long_val(v); if (n >= 0 && n < 0x40) { Write(PREFIX_SMALL_INT + n); } else if (n >= -(1 << 7) && n < (1 << 7)) { writecode8_r(ctx, CODE_INT8, n); } else if (n >= -(1 << 15) && n < (1 << 15)) { writecode16_r(ctx, CODE_INT16, n); #ifdef ARCH_SIXTYFOUR } else if (n < -((intnat)1 << 31) || n >= ((intnat)1 << 31)) { writecode64_r(ctx, CODE_INT64, n); #endif } else writecode32_r(ctx, CODE_INT32, n); goto next_item; } if (Is_in_value_area(v)) { header_t hd = Hd_val(v); tag_t tag = Tag_hd(hd); mlsize_t sz = Wosize_hd(hd); //DUMP("dumping %p, tag %i, size %i", (void*)v, (int)tag, (int)sz); // !!!!!!!!!!!!!!! if (tag == Forward_tag) { value f = Forward_val (v); if (Is_block (f) && (!Is_in_value_area(f) || Tag_val (f) == Forward_tag || Tag_val (f) == Lazy_tag || Tag_val (f) == Double_tag)){ /* Do not short-circuit the pointer. */ }else{ v = f; continue; } } /* Atoms are treated specially for two reasons: they are not allocated in the externed block, and they are automatically shared. */ if (sz == 0) { if (tag < 16) { Write(PREFIX_SMALL_BLOCK + tag); } else { writecode32_r(ctx, CODE_BLOCK32, hd); } goto next_item; } /* Check if already seen */ if (Color_hd(hd) == Caml_blue) { uintnat d = obj_counter - (uintnat) Field(v, 0); if (d < 0x100) { writecode8_r(ctx, CODE_SHARED8, d); } else if (d < 0x10000) { writecode16_r(ctx, CODE_SHARED16, d); } else { writecode32_r(ctx, CODE_SHARED32, d); } goto next_item; } /* Output the contents of the object */ switch(tag) { case String_tag: { mlsize_t len = caml_string_length(v); if (len < 0x20) { Write(PREFIX_SMALL_STRING + len); } else if (len < 0x100) { writecode8_r(ctx, CODE_STRING8, len); } else { writecode32_r(ctx, CODE_STRING32, len); } writeblock_r(ctx, String_val(v), len); size_32 += 1 + (len + 4) / 4; size_64 += 1 + (len + 8) / 8; extern_record_location_r(ctx, v); break; } case Double_tag: { if (sizeof(double) != 8) extern_invalid_argument_r(ctx, "output_value: non-standard floats"); Write(CODE_DOUBLE_NATIVE); writeblock_float8((double *) v, 1); size_32 += 1 + 2; size_64 += 1 + 1; extern_record_location_r(ctx,v); break; } case Double_array_tag: { mlsize_t nfloats; if (sizeof(double) != 8) extern_invalid_argument_r(ctx, "output_value: non-standard floats"); nfloats = Wosize_val(v) / Double_wosize; if (nfloats < 0x100) { writecode8_r(ctx, CODE_DOUBLE_ARRAY8_NATIVE, nfloats); } else { writecode32_r(ctx, CODE_DOUBLE_ARRAY32_NATIVE, nfloats); } writeblock_float8((double *) v, nfloats); size_32 += 1 + nfloats * 2; size_64 += 1 + nfloats; extern_record_location_r(ctx, v); break; } case Abstract_tag: extern_invalid_argument_r(ctx, "output_value: abstract value (Abstract)"); break; case Infix_tag: writecode32_r(ctx,CODE_INFIXPOINTER, Infix_offset_hd(hd)); extern_rec_r(ctx, v - Infix_offset_hd(hd)); break; case Custom_tag: { uintnat sz_32, sz_64; char * ident = Custom_ops_val(v)->identifier; void (*serialize)(value v, uintnat * wsize_32, uintnat * wsize_64); //printf("[object at %p, which is a %s custom: BEGIN\n", (void*)v, Custom_ops_val(v)->identifier); if(extern_cross_context){ //printf("About the object at %p, which is a %s custom: USING a cross-context serializer\n", (void*)v, Custom_ops_val(v)->identifier); serialize = Custom_ops_val(v)->cross_context_serialize; } else{ //printf("About the object at %p, which is a %s custom: NOT using a cross-context serializer\n", (void*)v, Custom_ops_val(v)->identifier); serialize = Custom_ops_val(v)->serialize; } //printf("Still alive 100\n"); if (serialize == NULL){ ////// //struct custom_operations *o = Custom_ops_val(v); //printf("About the object at %p, which is a %s custom\n", (void*)v, Custom_ops_val(v)->identifier); volatile int a = 1; a /= 0; /////////// extern_invalid_argument_r(ctx, "output_value: abstract value (Custom)"); } //printf("Still alive 200\n"); Write(CODE_CUSTOM); //printf("Still alive 300\n"); writeblock_r(ctx, ident, strlen(ident) + 1); //printf("Still alive 400\n"); serialize(v, &sz_32, &sz_64); //printf("Still alive 500\n"); size_32 += 2 + ((sz_32 + 3) >> 2); /* header + ops + data */ size_64 += 2 + ((sz_64 + 7) >> 3); //printf("Still alive 600\n"); extern_record_location_r(ctx,v); // This temporarily breaks the object, by replacing it with a forwarding pointer //printf("object at %p, which is a custom: END\n", (void*)v); break; } default: { value field0; if (tag < 16 && sz < 8) { Write(PREFIX_SMALL_BLOCK + tag + (sz << 4)); #ifdef ARCH_SIXTYFOUR } else if (hd >= ((uintnat)1 << 32)) { writecode64_r(ctx, CODE_BLOCK64, Whitehd_hd (hd)); #endif } else { writecode32_r(ctx, CODE_BLOCK32, Whitehd_hd (hd)); } size_32 += 1 + sz; size_64 += 1 + sz; field0 = Field(v, 0); extern_record_location_r(ctx, v); /* Remember that we still have to serialize fields 1 ... sz - 1 */ if (sz > 1) { sp++; if (sp >= extern_stack_limit) sp = extern_resize_stack_r(ctx, sp); sp->v = &Field(v,1); sp->count = sz-1; } /* Continue serialization with the first field */ v = field0; continue; } } } else if ((cf = extern_find_code_r(ctx, (char *) v)) != NULL) { if (!extern_closures){ extern_invalid_argument_r(ctx, "output_value: functional value"); // FIXME: this is the correct version. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! //DUMP("output_value: functional value"); {volatile int a = 1; a /= 0;} } //fprintf(stderr, "ZZZZ dumping a code pointer: BEGIN\n"); //DUMP("dumping a code pointer 0x%lx, or %li; code start is at %p", v, v, cf->code_start); writecode32_r(ctx, CODE_CODEPOINTER, (char *) v - cf->code_start); writeblock_r(ctx, (char *) cf->digest, 16); //dump_digest(cf->digest); //fprintf(stderr, "ZZZZ dumping a code pointer: END\n"); } else { if(extern_cross_context){ fprintf(stderr, "ZZZZ working on the external pointer: %p, which is to say %li [cf is %p]\n", (void*)v, (long)v, cf); //fprintf(stderr, "ZZZZ I'm doing a horrible, horrible thing: serializing the pointer as a tagged 0.\n"); DUMP("about to crash in the strange case I'm debugging"); /* DUMP("the object is 0x%lx, or %li ", v, v); */ /* DUMP("probably crashing now"); */ /* DUMP("tag is %i", (int)Tag_val(v)); */ /* DUMP("size is %i", (int)Wosize_val(v)); */ //volatile int a = 1; a /= 0; //extern_rec_r(ctx, Val_int(0)); /* fprintf(stderr, "ZZZZ [This is probably wrong: I'm marshalling an out-of-heap pointer as an int64]\n"); */ /* writecode64_r(ctx, CODE_INT64, (v << 1) | 1); */ extern_invalid_argument_r(ctx, "output_value: abstract value (outside heap) [FIXME: implement]"); } else extern_invalid_argument_r(ctx, "output_value: abstract value (outside heap)"); } next_item: /* Pop one more item to marshal, if any */ if (sp == extern_stack) { /* We are done. Cleanup the stack and leave the function */ extern_free_stack_r(ctx); return; } v = *((sp->v)++); if (--(sp->count) == 0) sp--; }
void caml_shared_unpin(value v) { Assert (Is_block(v) && !Is_minor(v)); Assert (caml_owner_of_shared_block(v) == caml_domain_self()); Assert (Has_status_hd(Hd_val(v), NOT_MARKABLE)); Hd_val(v) = With_status_hd(Hd_val(v), global.UNMARKED); }