static void push_and_mark_object(void *p) { GC_normal_finalize_mark_proc(p); while (!GC_mark_stack_empty()) { MARK_FROM_MARK_STACK(); } GC_set_mark_bit(p); if (GC_mark_state != MS_NONE) { while (!GC_mark_some(0)) { /* Empty. */ } } }
/* and invoke finalizers. */ void GC_finalize() { struct disappearing_link * curr_dl, * prev_dl, * next_dl; struct finalizable_object * curr_fo, * prev_fo, * next_fo; ptr_t real_ptr, real_link; size_t i; size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size); size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size); /* PLTSCHEME: for resetting the disapearing link */ /* PLTSCHEME: it's important to "push roots again" before making disappearing links disappear, because this step includes marking from ephemerons whose keys are reachable. We want to mark before disappearing links are disappeared. */ if (GC_push_last_roots_again) GC_push_last_roots_again(); /* Make disappearing links disappear */ for (i = 0; i < dl_size; i++) { curr_dl = dl_head[i]; prev_dl = 0; while (curr_dl != 0) { /* PLTSCHEME: skip late dls: */ if (curr_dl->dl_kind == LATE_DL) { prev_dl = curr_dl; curr_dl = dl_next(curr_dl); continue; } real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj); real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link); if (!GC_is_marked(real_ptr)) { *(word *)real_link = 0; next_dl = dl_next(curr_dl); if (prev_dl == 0) { dl_head[i] = next_dl; } else { dl_set_next(prev_dl, next_dl); } GC_clear_mark_bit((ptr_t)curr_dl); GC_dl_entries--; curr_dl = next_dl; } else { prev_dl = curr_dl; curr_dl = dl_next(curr_dl); } } } /* PLTSCHEME: All eagers first */ /* Enqueue for finalization all EAGER objects that are still */ /* unreachable. */ GC_bytes_finalized = 0; finalize_eagers(1); if (GC_push_last_roots_again) GC_push_last_roots_again(); finalize_eagers(2); if (GC_push_last_roots_again) GC_push_last_roots_again(); /* Mark all objects reachable via chains of 1 or more pointers */ /* from finalizable objects. */ /* PLTSCHEME: non-eager finalizations only (eagers already marked) */ # ifdef PRINTSTATS GC_ASSERT(GC_mark_state == MS_NONE); # endif for (i = 0; i < fo_size; i++) { for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) { if (!(curr_fo -> eager_level)) { /* PLTSCHEME */ real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base); if (!GC_is_marked(real_ptr)) { (*(curr_fo -> fo_mark_proc))(real_ptr); while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK(); if (GC_mark_state != MS_NONE) { /* Mark stack overflowed. Very unlikely. */ # ifdef PRINTSTATS if (GC_mark_state != MS_INVALID) ABORT("Bad mark state"); GC_printf("Mark stack overflowed in finalization!!\n"); # endif /* Make mark bits consistent again. Forget about */ /* finalizing this object for now. */ GC_set_mark_bit(real_ptr); while (!GC_mark_some((ptr_t)0)); } #if 0 if (GC_is_marked(real_ptr)) { /* PLTSCHEME: we have some ok cycles (below a parent) */ printf("Finalization cycle involving %lx\n", real_ptr); } #endif } } } } /* Enqueue for finalization all objects that are still */ /* unreachable. */ /* GC_bytes_finalized = 0; */ /* PLTSCHEME: done above */ for (i = 0; i < fo_size; i++) { curr_fo = fo_head[i]; prev_fo = 0; while (curr_fo != 0) { real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base); if (!GC_is_marked(real_ptr)) { GC_set_mark_bit(real_ptr); /* Delete from hash table */ next_fo = fo_next(curr_fo); if (prev_fo == 0) { fo_head[i] = next_fo; } else { fo_set_next(prev_fo, next_fo); } GC_fo_entries--; /* Add to list of objects awaiting finalization. */ fo_set_next(curr_fo, GC_finalize_now); GC_finalize_now = curr_fo; /* unhide object pointer so any future collections will */ /* see it. */ curr_fo -> fo_hidden_base = (word) REVEAL_POINTER(curr_fo -> fo_hidden_base); GC_bytes_finalized += curr_fo -> fo_object_size + sizeof(struct finalizable_object); curr_fo = next_fo; } else { prev_fo = curr_fo; curr_fo = fo_next(curr_fo); } } } /* Remove dangling disappearing links. */ for (i = 0; i < dl_size; i++) { curr_dl = dl_head[i]; prev_dl = 0; while (curr_dl != 0) { real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link)); if (real_link != 0 && !GC_is_marked(real_link)) { next_dl = dl_next(curr_dl); if (prev_dl == 0) { dl_head[i] = next_dl; } else { dl_set_next(prev_dl, next_dl); } GC_clear_mark_bit((ptr_t)curr_dl); GC_dl_entries--; curr_dl = next_dl; } else { prev_dl = curr_dl; curr_dl = dl_next(curr_dl); } } } /* PLTSCHEME: late disappearing links */ for (i = 0; i < dl_size; i++) { curr_dl = dl_head[i]; prev_dl = 0; while (curr_dl != 0) { /* PLTSCHEME: only late dls: */ if (curr_dl->dl_kind != LATE_DL) { prev_dl = curr_dl; curr_dl = dl_next(curr_dl); continue; } real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj); real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link); if (!GC_is_marked(real_ptr)) { *(word *)real_link = 0; next_dl = dl_next(curr_dl); if (prev_dl == 0) { dl_head[i] = next_dl; } else { dl_set_next(prev_dl, next_dl); } GC_clear_mark_bit((ptr_t)curr_dl); GC_dl_entries--; curr_dl = next_dl; } else { prev_dl = curr_dl; curr_dl = dl_next(curr_dl); } } } /* PLTSCHEME: */ if (GC_custom_finalize) GC_custom_finalize(); }
/* PLTSCHEME: eager finalization: */ static void finalize_eagers(int eager_level) { struct finalizable_object * curr_fo, * prev_fo, * next_fo; struct finalizable_object * end_eager_mark; ptr_t real_ptr; int i; int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size); end_eager_mark = GC_finalize_now; /* PLTSCHEME */ for (i = 0; i < fo_size; i++) { curr_fo = fo_head[i]; prev_fo = 0; while (curr_fo != 0) { if (curr_fo -> eager_level == eager_level) { real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base); if (!GC_is_marked(real_ptr)) { /* We assume that (non-eager) finalization orders do not need to take into account connections through memory with eager finalizations. Otherwise, this mark bit could break the chain from one (non-eager) finalizeable to another. */ GC_set_mark_bit(real_ptr); /* Delete from hash table */ next_fo = fo_next(curr_fo); if (prev_fo == 0) { fo_head[i] = next_fo; } else { fo_set_next(prev_fo, next_fo); } GC_fo_entries--; /* Add to list of objects awaiting finalization. */ fo_set_next(curr_fo, GC_finalize_now); GC_finalize_now = curr_fo; /* unhide object pointer so any future collections will */ /* see it. */ curr_fo -> fo_hidden_base = (word) REVEAL_POINTER(curr_fo -> fo_hidden_base); GC_bytes_finalized += curr_fo -> fo_object_size + sizeof(struct finalizable_object); # ifdef PRINTSTATS if (!GC_is_marked((ptr_t)curr_fo)) { ABORT("GC_finalize: found accessible unmarked object\n"); } # endif curr_fo = next_fo; } else { prev_fo = curr_fo; curr_fo = fo_next(curr_fo); } } else { prev_fo = curr_fo; curr_fo = fo_next(curr_fo); } } } /* PLTSCHEME: Mark from queued eagers: */ for (curr_fo = GC_finalize_now; curr_fo != end_eager_mark; curr_fo = fo_next(curr_fo)) { /* PLTSCHEME: if this is an eager finalization, then objects accessible from real_ptr need to be marked */ if (curr_fo -> eager_level == eager_level) { (*(curr_fo -> fo_mark_proc))(curr_fo -> fo_hidden_base); while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK(); if (GC_mark_state != MS_NONE) { /* Mark stack overflowed. Very unlikely. Everything's ok, though. Just mark from scratch. */ while (!GC_mark_some((ptr_t)0)); } } } }