/* Could p be a stack address? */ STATIC GC_bool GC_on_stack(ptr_t p) { # ifdef STACK_GROWS_DOWN if ((ptr_t)p >= GC_approx_sp() && (ptr_t)p < GC_stackbottom) { return(TRUE); } # else if ((ptr_t)p <= GC_approx_sp() && (ptr_t)p > GC_stackbottom) { return(TRUE); } # endif return(FALSE); }
void * GC_mark_thread(void * id) { word my_mark_no = 0; marker_sp[(word)id] = GC_approx_sp(); # ifdef IA64 marker_bsp[(word)id] = GC_save_regs_in_stack(); # endif for (;; ++my_mark_no) { /* GC_mark_no is passed only to allow GC_help_marker to terminate */ /* promptly. This is important if it were called from the signal */ /* handler or from the GC lock acquisition code. Under Linux, it's */ /* not safe to call it from a signal handler, since it uses mutexes */ /* and condition variables. Since it is called only here, the */ /* argument is unnecessary. */ if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) { /* resynchronize if we get far off, e.g. because GC_mark_no */ /* wrapped. */ my_mark_no = GC_mark_no; } # ifdef DEBUG_THREADS GC_printf("Starting mark helper for mark number %lu\n", my_mark_no); # endif GC_help_marker(my_mark_no); } }
/*ARGSUSED*/ STATIC void GC_push_current_stack(ptr_t cold_gc_frame, void * context) { # if defined(THREADS) if (0 == cold_gc_frame) return; # ifdef STACK_GROWS_DOWN GC_push_all_eager(GC_approx_sp(), cold_gc_frame); /* For IA64, the register stack backing store is handled */ /* in the thread-specific code. */ # else GC_push_all_eager(cold_gc_frame, GC_approx_sp()); # endif # else GC_push_all_stack_part_eager_sections(GC_approx_sp(), GC_stackbottom, cold_gc_frame, GC_traced_stack_sect); # ifdef IA64 /* We also need to push the register stack backing store. */ /* This should really be done in the same way as the */ /* regular stack. For now we fudge it a bit. */ /* Note that the backing store grows up, so we can't use */ /* GC_push_all_stack_partially_eager. */ { ptr_t bsp = GC_save_regs_ret_val; ptr_t cold_gc_bs_pointer = bsp - 2048; if (GC_all_interior_pointers && cold_gc_bs_pointer > BACKING_STORE_BASE) { /* Adjust cold_gc_bs_pointer if below our innermost */ /* "traced stack section" in backing store. */ if (GC_traced_stack_sect != NULL && cold_gc_bs_pointer < GC_traced_stack_sect->backing_store_end) cold_gc_bs_pointer = GC_traced_stack_sect->backing_store_end; GC_push_all_register_sections(BACKING_STORE_BASE, cold_gc_bs_pointer, FALSE, GC_traced_stack_sect); GC_push_all_eager(cold_gc_bs_pointer, bsp); } else { GC_push_all_register_sections(BACKING_STORE_BASE, bsp, TRUE /* eager */, GC_traced_stack_sect); } /* All values should be sufficiently aligned that we */ /* don't have to worry about the boundary. */ } # endif # endif /* !THREADS */ }
/* collections to amortize the collection cost. Should be non-zero. */ static word min_bytes_allocd(void) { word result; word stack_size; word total_root_size; /* includes double stack size, */ /* since the stack is expensive */ /* to scan. */ word scan_size; /* Estimate of memory to be scanned */ /* during normal GC. */ # ifdef THREADS if (GC_need_to_lock) { /* We are multi-threaded... */ stack_size = GC_total_stacksize; /* For now, we just use the value computed during the latest GC. */ # ifdef DEBUG_THREADS GC_log_printf("Total stacks size: %lu\n", (unsigned long)stack_size); # endif } else # endif /* else*/ { # ifdef STACK_NOT_SCANNED stack_size = 0; # elif defined(STACK_GROWS_UP) stack_size = GC_approx_sp() - GC_stackbottom; # else stack_size = GC_stackbottom - GC_approx_sp(); # endif } total_root_size = 2 * stack_size + GC_root_size; scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4 + total_root_size; result = scan_size / GC_free_space_divisor; if (GC_incremental) { result /= 2; } return result > 0 ? result : 1; }