/* * External API to register root set in dynamically loaded library. * Boehm GC doesn't do this automatically on some platforms. * * NB: The scheme we're using to find bss area (by Scm__bss{start|end}) * is getting less effective, since more platforms are adopting the * linker that rearranges bss variables. The extensions should not * keep GC_MALLOCED pointer into the bss variable. */ void Scm_RegisterDL(void *data_start, void *data_end, void *bss_start, void *bss_end) { if (data_start < data_end) { GC_add_roots(data_start, data_end); } if (bss_start < bss_end) { GC_add_roots(bss_start, bss_end); } }
void scheme_register_static(void *ptr, long size) { #if defined(MZ_PRECISE_GC) || defined(USE_SENORA_GC) /* Always register for precise and Senora GC: */ GC_add_roots((char *)ptr, (char *)(((char *)ptr) + size + 1)); #else # ifdef GC_MIGHT_USE_REGISTERED_STATICS if (use_registered_statics) { GC_add_roots((char *)ptr, (char *)(((char *)ptr) + size + 1)); } # endif #endif }
static void reset_object_traces() { if (found_object_count < 0) GC_add_roots(found_objects, found_objects + MAX_FOUND_OBJECTS); found_object_count = 0; }
void InitBags(UInt initial_size, Bag * stack_bottom, UInt stack_align) { UInt i; /* loop variable */ /* install the marking functions */ for (i = 0; i < NUM_TYPES; i++) { TabMarkTypeBags[i] = -1; } #ifndef DISABLE_GC #ifdef HPCGAP if (!getenv("GC_MARKERS")) { /* The Boehm GC does not have an API to set the number of * markers for the parallel mark and sweep implementation, * so we use the documented environment variable GC_MARKERS * instead. However, we do not override it if it's already * set. */ static char marker_env_str[32]; unsigned num_markers = 2; if (!SyNumGCThreads) SyNumGCThreads = SyNumProcessors; if (SyNumGCThreads) { if (SyNumGCThreads <= MAX_GC_THREADS) num_markers = (unsigned)SyNumProcessors; else num_markers = MAX_GC_THREADS; } sprintf(marker_env_str, "GC_MARKERS=%u", num_markers); putenv(marker_env_str); } #endif GC_set_all_interior_pointers(0); GC_init(); GC_set_free_space_divisor(1); TLAllocatorInit(); GC_register_displacement(0); GC_register_displacement(sizeof(BagHeader)); initial_size *= 1024; if (GC_get_heap_size() < initial_size) GC_expand_hp(initial_size - GC_get_heap_size()); if (SyStorKill) GC_set_max_heap_size(SyStorKill * 1024); #ifdef HPCGAP AddGCRoots(); CreateMainRegion(); #else void * p = ActiveGAPState(); GC_add_roots(p, (char *)p + sizeof(GAPState)); #endif for (i = 0; i <= MAX_GC_PREFIX_DESC; i++) { BuildPrefixGCDescriptor(i); /* This is necessary to initialize some internal structures * in the garbage collector: */ GC_generic_malloc(sizeof(BagHeader) + i * sizeof(Bag), GCMKind[i]); } #endif /* DISABLE_GC */ }
int mono_gc_register_root (char *start, size_t size, void *descr) { /* for some strange reason, they want one extra byte on the end */ GC_add_roots (start, start + size + 1); return TRUE; }
GC *create_conservative_gc(pointer_iterator rm, void *roots, unsigned int root_size, unsigned int allocations_per_collection) { GC_INIT(); /* We need to tell the collector about the root set because it was not allocated by the garbage collector. */ GC_add_roots(roots, ((char *)roots)+root_size); /* Inform the collector that we will be registering additional threads. */ GC_allow_register_threads(); GC *g = (GC *) GC_MALLOC(sizeof(GC)); if(g == NULL) return NULL; g->protect_ptr_count = 0; g->thread_start = conservative_thread_start; g->thread_end = conservative_thread_end; g->enable_gc = conservative_enable_gc; g->disable_gc = conservative_disable_gc; g->free_gc = conservative_free_gc; g->gc_allocate = conservative_allocate; g->collect_garbage = conservative_collect_garbage; g->set_gc_default_value = conservative_set_gc_default_value; g->set_gc_post_collection_callback = conservative_set_gc_post_collection_callback; g->set_gc_mode = conservative_set_gc_mode; g->set_gc_work_per_alloc = conservative_set_gc_work_per_alloc; g->store = conservative_store; g->protect_ptr = conservative_protect_ptr; g->unprotect_ptr = conservative_unprotect_ptr; return g; }
static char* process_dynamic_segment (char *base, Elf_Phdr *dyn_phdr, SCM *init_out, SCM *entry_out, char **frame_maps_out) { char *dyn_addr = base + dyn_phdr->p_vaddr; Elf_Dyn *dyn = (Elf_Dyn *) dyn_addr; size_t i, dyn_size = dyn_phdr->p_memsz / sizeof (Elf_Dyn); char *init = 0, *gc_root = 0, *entry = 0, *frame_maps = 0; scm_t_ptrdiff gc_root_size = 0; enum bytecode_kind bytecode_kind = BYTECODE_KIND_NONE; for (i = 0; i < dyn_size; i++) { if (dyn[i].d_tag == DT_NULL) break; switch (dyn[i].d_tag) { case DT_INIT: if (init) return "duplicate DT_INIT"; init = base + dyn[i].d_un.d_val; break; case DT_GUILE_GC_ROOT: if (gc_root) return "duplicate DT_GUILE_GC_ROOT"; gc_root = base + dyn[i].d_un.d_val; break; case DT_GUILE_GC_ROOT_SZ: if (gc_root_size) return "duplicate DT_GUILE_GC_ROOT_SZ"; gc_root_size = dyn[i].d_un.d_val; break; case DT_GUILE_ENTRY: if (entry) return "duplicate DT_GUILE_ENTRY"; entry = base + dyn[i].d_un.d_val; break; case DT_GUILE_VM_VERSION: if (bytecode_kind != BYTECODE_KIND_NONE) return "duplicate DT_GUILE_VM_VERSION"; { scm_t_uint16 major = dyn[i].d_un.d_val >> 16; scm_t_uint16 minor = dyn[i].d_un.d_val & 0xffff; switch (major) { case 0x0202: bytecode_kind = BYTECODE_KIND_GUILE_2_2; /* As we get closer to 2.2, we will allow for backwards compatibility and we can change this test to ">" instead of "!=". However until then, to deal with VM churn it's best to keep these things in lock-step. */ if (minor != SCM_OBJCODE_MINOR_VERSION) return "incompatible bytecode version"; break; default: return "incompatible bytecode kind"; } break; } case DT_GUILE_FRAME_MAPS: if (frame_maps) return "duplicate DT_GUILE_FRAME_MAPS"; frame_maps = base + dyn[i].d_un.d_val; break; } } if (!entry) return "missing DT_GUILE_ENTRY"; switch (bytecode_kind) { case BYTECODE_KIND_GUILE_2_2: if ((scm_t_uintptr) init % 4) return "unaligned DT_INIT"; if ((scm_t_uintptr) entry % 4) return "unaligned DT_GUILE_ENTRY"; break; case BYTECODE_KIND_NONE: default: return "missing DT_GUILE_VM_VERSION"; } if (gc_root) GC_add_roots (gc_root, gc_root + gc_root_size); *init_out = init ? pointer_to_procedure (bytecode_kind, init) : SCM_BOOL_F; *entry_out = pointer_to_procedure (bytecode_kind, entry); *frame_maps_out = frame_maps; return NULL; }
static char* process_dynamic_segment (char *base, Elf_Phdr *dyn_phdr, SCM *init_out, SCM *entry_out) { char *dyn_addr = base + dyn_phdr->p_vaddr; Elf_Dyn *dyn = (Elf_Dyn *) dyn_addr; size_t i, dyn_size = dyn_phdr->p_memsz / sizeof (Elf_Dyn); char *init = 0, *gc_root = 0, *entry = 0; scm_t_ptrdiff gc_root_size = 0; enum bytecode_kind bytecode_kind = BYTECODE_KIND_NONE; for (i = 0; i < dyn_size; i++) { if (dyn[i].d_tag == DT_NULL) break; switch (dyn[i].d_tag) { case DT_INIT: if (init) return "duplicate DT_INIT"; init = base + dyn[i].d_un.d_val; break; case DT_GUILE_GC_ROOT: if (gc_root) return "duplicate DT_GUILE_GC_ROOT"; gc_root = base + dyn[i].d_un.d_val; break; case DT_GUILE_GC_ROOT_SZ: if (gc_root_size) return "duplicate DT_GUILE_GC_ROOT_SZ"; gc_root_size = dyn[i].d_un.d_val; break; case DT_GUILE_ENTRY: if (entry) return "duplicate DT_GUILE_ENTRY"; entry = base + dyn[i].d_un.d_val; break; case DT_GUILE_RTL_VERSION: if (bytecode_kind != BYTECODE_KIND_NONE) return "duplicate DT_GUILE_RTL_VERSION"; { scm_t_uint16 major = dyn[i].d_un.d_val >> 16; scm_t_uint16 minor = dyn[i].d_un.d_val & 0xffff; if (major != 0x0200) return "incompatible bytecode kind"; if (minor > SCM_OBJCODE_MINOR_VERSION) return "incompatible bytecode version"; bytecode_kind = BYTECODE_KIND_GUILE_2_0; break; } } } if (bytecode_kind != BYTECODE_KIND_GUILE_2_0) return "missing DT_GUILE_RTL_VERSION"; if (init) return "unexpected DT_INIT"; if ((scm_t_uintptr) entry % 8) return "unaligned DT_GUILE_ENTRY"; if (!entry) return "missing DT_GUILE_ENTRY"; if (gc_root) GC_add_roots (gc_root, gc_root + gc_root_size); *init_out = SCM_BOOL_F; *entry_out = pointer_to_procedure (bytecode_kind, entry); return NULL; }
void gcAddRoot(void* ptr) { GC_add_roots(ptr, ptr + sizeof(void*)); }