jboolean initGC(Options* options) { GC_set_no_dls(1); GC_set_java_finalization(1); GC_INIT(); if (options->maxHeapSize > 0) { GC_set_max_heap_size(options->maxHeapSize); } if (options->initialHeapSize > 0) { size_t now = GC_get_heap_size(); if (options->initialHeapSize > now) { GC_expand_hp(options->initialHeapSize - now); } } objectGCKind = GC_new_kind(GC_new_free_list(), GC_MAKE_PROC(GC_new_proc(markObject), 0), 0, 1); largeArrayGCKind = GC_new_kind(GC_new_free_list(), GC_DS_LENGTH, 1, 1); atomicObjectGCKind = GC_new_kind(GC_new_free_list(), GC_DS_LENGTH, 0, 1); referentEntryGCKind = gcNewDirectBitmapKind(REFERENT_ENTRY_GC_BITMAP); if (rvmInitMutex(&referentsLock) != 0) { return FALSE; } if (rvmInitMutex(&gcRootsLock) != 0) { return FALSE; } GC_set_warn_proc(gcWarnProc); return TRUE; }
/* Caller does not hold allocation lock. */ void GC_init_gcj_malloc(int mp_index, void * /* really GC_mark_proc */mp) { register int i; GC_bool ignore_gcj_info; DCL_LOCK_STATE; GC_init(); /* In case it's not already done. */ DISABLE_SIGNALS(); LOCK(); if (GC_gcj_malloc_initialized) { UNLOCK(); ENABLE_SIGNALS(); return; } GC_gcj_malloc_initialized = TRUE; ignore_gcj_info = (0 != GETENV("GC_IGNORE_GCJ_INFO")); # ifdef CONDPRINT if (GC_print_stats && ignore_gcj_info) { GC_printf0("Gcj-style type information is disabled!\n"); } # endif GC_ASSERT(GC_mark_procs[mp_index] == (GC_mark_proc)0); /* unused */ GC_mark_procs[mp_index] = (GC_mark_proc)mp; if (mp_index >= GC_n_mark_procs) ABORT("GC_init_gcj_malloc: bad index"); /* Set up object kind gcj-style indirect descriptor. */ GC_gcjobjfreelist = (ptr_t *)GC_new_free_list_inner(); if (ignore_gcj_info) { /* Use a simple length-based descriptor, thus forcing a fully */ /* conservative scan. */ GC_gcj_kind = GC_new_kind_inner((void **)GC_gcjobjfreelist, (0 | GC_DS_LENGTH), TRUE, TRUE); } else { GC_gcj_kind = GC_new_kind_inner( (void **)GC_gcjobjfreelist, (((word)(-MARK_DESCR_OFFSET - GC_INDIR_PER_OBJ_BIAS)) | GC_DS_PER_OBJECT), FALSE, TRUE); } /* Set up object kind for objects that require mark proc call. */ if (ignore_gcj_info) { GC_gcj_debug_kind = GC_gcj_kind; GC_gcjdebugobjfreelist = GC_gcjobjfreelist; } else { GC_gcjdebugobjfreelist = (ptr_t *)GC_new_free_list_inner(); GC_gcj_debug_kind = GC_new_kind_inner( (void **)GC_gcjdebugobjfreelist, GC_MAKE_PROC(mp_index, 1 /* allocated with debug info */), FALSE, TRUE); } UNLOCK(); ENABLE_SIGNALS(); }
jboolean initGC(Options* options) { GC_INIT(); GC_set_java_finalization(1); if (options->maxHeapSize > 0) { GC_set_max_heap_size(options->maxHeapSize); } if (options->initialHeapSize > 0) { size_t now = GC_get_heap_size(); if (options->initialHeapSize > now) { GC_expand_hp(options->initialHeapSize - now); } } object_gc_kind = GC_new_kind(GC_new_free_list(), GC_MAKE_PROC(GC_new_proc(markObject), 0), 0, 1); if (rvmInitMutex(&referentsLock) != 0) { return FALSE; } return TRUE; }
STATIC void GC_init_explicit_typing(void) { unsigned i; GC_STATIC_ASSERT(sizeof(struct LeafDescriptor) % sizeof(word) == 0); /* Set up object kind with simple indirect descriptor. */ GC_eobjfreelist = (ptr_t *)GC_new_free_list_inner(); GC_explicit_kind = GC_new_kind_inner( (void **)GC_eobjfreelist, (WORDS_TO_BYTES((word)-1) | GC_DS_PER_OBJECT), TRUE, TRUE); /* Descriptors are in the last word of the object. */ GC_typed_mark_proc_index = GC_new_proc_inner(GC_typed_mark_proc); /* Set up object kind with array descriptor. */ GC_array_mark_proc_index = GC_new_proc_inner(GC_array_mark_proc); GC_array_kind = GC_new_kind_inner(GC_new_free_list_inner(), GC_MAKE_PROC(GC_array_mark_proc_index, 0), FALSE, TRUE); GC_bm_table[0] = GC_DS_BITMAP; for (i = 1; i < WORDSZ/2; i++) { GC_bm_table[i] = (((word)-1) << (WORDSZ - i)) | GC_DS_BITMAP; } }
STATIC mse * GC_typed_mark_proc(word * addr, mse * mark_stack_ptr, mse * mark_stack_limit, word env) { word bm = GC_ext_descriptors[env].ed_bitmap; word * current_p = addr; word current; ptr_t greatest_ha = (ptr_t)GC_greatest_plausible_heap_addr; ptr_t least_ha = (ptr_t)GC_least_plausible_heap_addr; DECLARE_HDR_CACHE; INIT_HDR_CACHE; for (; bm != 0; bm >>= 1, current_p++) { if (bm & 1) { current = *current_p; FIXUP_POINTER(current); if (current >= (word)least_ha && current <= (word)greatest_ha) { PUSH_CONTENTS((ptr_t)current, mark_stack_ptr, mark_stack_limit, (ptr_t)current_p); } } } if (GC_ext_descriptors[env].ed_continued) { /* Push an entry with the rest of the descriptor back onto the */ /* stack. Thus we never do too much work at once. Note that */ /* we also can't overflow the mark stack unless we actually */ /* mark something. */ mark_stack_ptr++; if ((word)mark_stack_ptr >= (word)mark_stack_limit) { mark_stack_ptr = GC_signal_mark_stack_overflow(mark_stack_ptr); } mark_stack_ptr -> mse_start = (ptr_t)(addr + WORDSZ); mark_stack_ptr -> mse_descr.w = GC_MAKE_PROC(GC_typed_mark_proc_index, env + 1); } return(mark_stack_ptr); }