static void zero_weak_boxes(GCTYPE *gc, int is_late, int force_zero) { GC_Weak_Box *wb; wb = gc->weak_boxes[is_late]; while (wb) { if (force_zero || !is_marked(gc, wb->val)) { wb->val = NULL; if (wb->secondary_erase) { void **p; mpage *page; /* it's possible for the secondary to be in an old generation and therefore on an mprotected page: */ page = pagemap_find_page(gc->page_maps, wb->secondary_erase); if (page->mprotected) { page->mprotected = 0; mmu_write_unprotect_page(gc->mmu, page->addr, APAGE_SIZE); GC_MP_CNT_INC(mp_mark_cnt); } p = (void **)GC_resolve2(wb->secondary_erase, gc); *(p + wb->soffset) = NULL; wb->secondary_erase = NULL; } } wb = wb->next; } /* reset, in case we have a second round */ gc->weak_boxes[is_late] = NULL; }
static int mark_ready_ephemerons(GCTYPE *gc, int inc_gen1) { GC_Ephemeron *waiting, *next, *eph; int did_one = 0, j; GC_mark_no_recur(gc, 1); for (j = 0; j < (inc_gen1 ? 1 : 2); j++) { if (inc_gen1) eph = gc->inc_ephemerons; else if (j == 0) eph = gc->ephemerons; else eph = gc->bp_ephemerons; waiting = NULL; for (; eph; eph = next) { if (inc_gen1) next = eph->inc_next; else next = eph->next; if (is_marked(gc, eph->key)) { if (!inc_gen1) eph->key = GC_resolve2(eph->key, gc); gcMARK2(eph->val, gc); gc->num_last_seen_ephemerons++; did_one = 1; if (!inc_gen1 && (j == 0) && !gc->gc_full && gc->started_incremental) { /* Need to preserve the ephemeron in the incremental list, unless it's kept in generation 1/2 nistead of promoted to generation 1. */ if (!is_in_generation_half(gc, eph)) { eph->inc_next = gc->inc_ephemerons; gc->inc_ephemerons = eph; } } } else { if (inc_gen1) { /* Ensure that we can write to the page containing the emphemeron: */ check_incremental_unprotect(gc, pagemap_find_page(gc->page_maps, eph)); eph->inc_next = waiting; } else eph->next = waiting; waiting = eph; } } if (inc_gen1) gc->inc_ephemerons = waiting; else if (j == 0) gc->ephemerons = waiting; else gc->bp_ephemerons = waiting; } GC_mark_no_recur(gc, 0); return did_one; }
int GC_dbg_dump_mpage_for_p(void *p) { NewGC *gc = GC_get_GC(); mpage *page = pagemap_find_page(gc->page_maps, p); if (page) { GC_dbg_dump_mpage(page); return 1; } else { printf("Not allocated by the GC\n"); return 0; } }
void fault_handler(int sn, struct siginfo *si, void *ctx) { void *p = si->si_addr; /* quick access to SIGSEGV info in GDB */ int c = si->si_code; #ifdef MZ_USE_PLACES int m = 0; #endif if (si->si_code != SEGV_ACCERR) { /*SEGV_MAPERR*/ if (c == SEGV_MAPERR) { printf("SIGSEGV MAPERR si_code %i fault on addr %p\n", c, p); } if (c == 0 ) { /* I have now idea why this happens on linux */ /* supposedly its coming from the user via kill */ /* so just ignore it. */ printf("SIGSEGV SI_USER SI_CODE %i fault on addr %p\n", c, p); printf("pid %i uid %i thread %lx\n", si->si_pid, si->si_uid, mz_proc_thread_self()); return; } if (c == 128 ) { printf("SIGSEGV SI_KERNEL SI_CODE %i fault on addr %p sent by kernel\n", c, p); } #if WAIT_FOR_GDB launchgdb(); #endif abort(); } if (!designate_modified(p)) { if (si->si_code == SEGV_ACCERR) { #ifdef MZ_USE_PLACES if(pagemap_find_page(MASTERGC->page_maps, p)) { m = 1; printf("ADDR %p OWNED BY MASTER %i\n", p, m); } #endif printf("SIGSEGV SEGV_ACCERR SI_CODE %i fault on %p\n", c, p); } else { printf("SIGSEGV ???? SI_CODE %i fault on %p\n", c, p); } abort(); } # define NEED_SIGSTACK # define NEED_SIGACTION # define USE_SIGACTON_SIGNAL_KIND SIGSEGV }
int GC_is_live(void *p) { NewGC *gc = GC_get_GC(); mpage *page = pagemap_find_page(gc->page_maps, p); if (!page) { /* NOT GC ALLOCATED */ printf("%p page: %p NOT GC ALLOCATED\n", p, page); fflush(stdout); return 0; } else if (page->generation == 0) { if (page == gc->gen0.curr_alloc_page) { if (p < (void*) GC_gen0_alloc_page_ptr) { printf("GEN0 object %p page: %p gen: %i class: %i ALIVE\n", p, page, page->generation, page->size_class); printf("%p BEGIN: %p ALLOCED_UPTO: %p END: %p\n", p, (void*) gc->gen0.curr_alloc_page->addr, (void*) GC_gen0_alloc_page_ptr, (void*) GC_gen0_alloc_page_end); fflush(stdout); return 1; } else { printf("GEN0 object %p page: %p gen: %i class: %i DEAD\n", p, page, page->generation, page->size_class); printf("%p BEGIN: %p ALLOCED_UPTO: %p END: %p\n", p, (void*) gc->gen0.curr_alloc_page->addr, (void*) GC_gen0_alloc_page_ptr, (void*) GC_gen0_alloc_page_end); fflush(stdout); return 0; } } return NUM(p) < (NUM(page->addr) + page->size); } else { /* page->generation */ if (page->size_class == 1) { int dead = OBJPTR_TO_OBJHEAD(p)->dead; printf("MEDIUM object %p page: %p gen: %i class: %i dead: %i\n", p, page, page->generation, page->size_class, dead); fflush(stdout); return !dead; } else if((NUM(page->addr) + page->size) > NUM(p)) { printf("%p page: %p gen: %i class: %i ALIVE\n", p, page, page->generation, page->size_class); printf("%p BEGIN: %p ALLOCED_UPTO: %p\n", p, (void*) page->addr, (void*) (NUM(page->addr) + page->size)); fflush(stdout); return 1; } else { printf("%p page: %p gen: %i class: %i DEAD\n", p, page, page->generation, page->size_class); printf("%p BEGIN: %p ALLOCED_UPTO: %p\n", p, (void*) page->addr, (void*) (NUM(page->addr) + page->size)); fflush(stdout); return 0; } } }
static void *print_out_pointer(const char *prefix, void *p, GC_get_type_name_proc get_type_name, GC_print_tagged_value_proc print_tagged_value, int *_kind) { trace_page_t *page; const char *what; page = pagemap_find_page(GC_instance->page_maps, p); if (!page || (trace_page_type(page) == TRACE_PAGE_BAD)) { GCPRINT(GCOUTF, "%s%s %p\n", prefix, trace_source_kind(*_kind), p); return NULL; } p = trace_pointer_start(page, p); if ((trace_page_type(page) == TRACE_PAGE_TAGGED) || (trace_page_type(page) == TRACE_PAGE_PAIR)) { Type_Tag tag; tag = *(Type_Tag *)p; if ((tag >= 0) && get_type_name && get_type_name(tag)) { print_tagged_value(prefix, p, 0, 1000, "\n"); } else { GCPRINT(GCOUTF, "%s<#%d> %p\n", prefix, tag, p); } what = NULL; } else if (trace_page_type(page) == TRACE_PAGE_ARRAY) { what = "ARRAY"; } else if (trace_page_type(page) == TRACE_PAGE_ATOMIC) { what = "ATOMIC"; } else if (trace_page_type(page) == TRACE_PAGE_MALLOCFREE) { what = "MALLOCED"; } else { what = "?!?"; } if (what) { GCPRINT(GCOUTF, "%s%s%s %p\n", prefix, what, (trace_page_is_big(page) ? "b" : ""), p); } return trace_backpointer(page, p, _kind); }
inline static void clean_up_thread_list(NewGC *gc) { GC_Thread_Info *work = gc->thread_infos; GC_Thread_Info *prev = NULL; while(work) { if(!pagemap_find_page(gc->page_maps, work->thread) || marked(gc, work->thread)) { work->thread = GC_resolve2(work->thread, gc); prev = work; work = work->next; } else { GC_Thread_Info *next = work->next; if(prev) prev->next = next; if(!prev) gc->thread_infos = next; ofm_free(work, sizeof(GC_Thread_Info)); work = next; } } }
static int zero_weak_boxes(GCTYPE *gc, int is_late, int force_zero, int from_inc, int fuel) { GC_Weak_Box *wb; int num_gen0; if (from_inc) { wb = gc->inc_weak_boxes[is_late]; num_gen0 = 0; } else { wb = append_weak_boxes(gc->weak_boxes[is_late], gc->bp_weak_boxes[is_late], &num_gen0); if (gc->gc_full || !gc->started_incremental) num_gen0 = 0; } while (wb) { GC_ASSERT(is_marked(gc, wb)); if (!wb->val) { /* nothing to do */ } else if (force_zero || !is_marked(gc, wb->val)) { wb->val = NULL; if (wb->secondary_erase) { void **p; mpage *page; /* it's possible for the secondary to be in an old generation and therefore on an mprotected page: */ page = pagemap_find_page(gc->page_maps, wb->secondary_erase); if (page->mprotected) { page->mprotected = 0; mmu_write_unprotect_page(gc->mmu, page->addr, APAGE_SIZE, page_mmu_type(page), &page->mmu_src_block); page->reprotect_next = gc->reprotect_next; gc->reprotect_next = page; page->reprotect = 1; } p = (void **)GC_resolve2(wb->secondary_erase, gc); *(p + wb->soffset) = NULL; wb->secondary_erase = NULL; } } else { wb->val = GC_resolve2(wb->val, gc); } if (num_gen0 > 0) { if (!is_in_generation_half(gc, wb)) { /* For incremental mode, preserve this weak box in the incremental list for re-checking later. */ check_weak_box_not_already_in_inc_chain(wb, gc->inc_weak_boxes[wb->is_late]); wb->inc_next = gc->inc_weak_boxes[is_late]; gc->inc_weak_boxes[is_late] = wb; } } if (from_inc) { GC_Weak_Box *next; next = wb->inc_next; wb->inc_next = gc->weak_incremental_done; wb = next; } else wb = wb->next; num_gen0--; if (fuel >= 0) { if (fuel > 0) fuel--; else { GC_ASSERT(from_inc); gc->inc_weak_boxes[is_late] = wb; return 0; } } } /* reset, in case we have a second round */ if (from_inc) { gc->inc_weak_boxes[is_late] = NULL; } else { gc->weak_boxes[is_late] = NULL; gc->bp_weak_boxes[is_late] = NULL; } return fuel; }
void fault_handler(int sn, siginfo_t *si, void *ctx) { void *p = si->si_addr; /* quick access to SIGSEGV info in GDB */ int c = si->si_code; #ifdef MZ_USE_PLACES int m = 0; #endif if (si->si_code != SEGV_ACCERR) { /*SEGV_MAPERR*/ if (c == SEGV_MAPERR) { printf("SIGSEGV MAPERR si_code %i fault on addr %p\n", c, p); /* SIGSEGV MAPERRs are invalid addresses. Possible reasons: An object is prematurely freed because it isn't getting marked correctly An unsafe operation was used incorrectly The stack grew beyond its bounds. */ } if (c == 0) { /* I have no idea why this happens on linux */ /* supposedly its coming from the user via kill */ /* so just ignore it. It appears when */ /* running w/ places in GDB */ printf("SIGSEGV SI_USER SI_ERRNO %i fault on addr %p\n", si->si_errno, p); #ifdef MZ_USE_PLACES printf("pid %i uid %i thread %lx\n", si->si_pid, si->si_uid, mz_proc_thread_self()); #else printf("pid %i uid %i\n", si->si_pid, si->si_uid); #endif return; } if (c == 128) { printf("SIGSEGV SI_KERNEL SI_ERRNO %i fault on addr %p\n", si->si_errno, p); } #if WAIT_FOR_GDB launchgdb(); #endif abort(); } if (!designate_modified(p)) { if (si->si_code == SEGV_ACCERR) { #ifdef MZ_USE_PLACES if(pagemap_find_page(MASTERGC->page_maps, p)) { m = 1; printf("ADDR %p OWNED BY MASTER %i\n", p, m); } #endif printf("SIGSEGV SEGV_ACCERR SI_CODE %i fault on %p\n", c, p); } else { printf("SIGSEGV ???? SI_CODE %i fault on %p\n", c, p); } #if WAIT_FOR_GDB launchgdb(); #endif abort(); } # define NEED_SIGSTACK # define NEED_SIGACTION # define USE_SIGACTON_SIGNAL_KIND SIGSEGV }