/* And a debugging version of the above: */ void * GC_debug_gcj_fast_malloc(size_t lw, void * ptr_to_struct_containing_descr, GC_EXTRA_PARAMS) { GC_PTR result; size_t lb = WORDS_TO_BYTES(lw); /* We clone the code from GC_debug_gcj_malloc, so that we */ /* dont end up with extra frames on the stack, which could */ /* confuse the backtrace. */ LOCK(); maybe_finalize(); result = GC_generic_malloc_inner(lb + DEBUG_BYTES, GC_gcj_debug_kind); if (result == 0) { UNLOCK(); GC_err_printf2("GC_debug_gcj_fast_malloc(%ld, 0x%lx) returning NIL (", (unsigned long) lw, (unsigned long) ptr_to_struct_containing_descr); GC_err_puts(s); GC_err_printf1(":%ld)\n", (unsigned long)i); return GC_oom_fn(WORDS_TO_BYTES(lw)); } *((void **)((ptr_t)result + sizeof(oh))) = ptr_to_struct_containing_descr; UNLOCK(); if (!GC_debugging_started) { GC_start_debugging(); } ADD_CALL_CHAIN(result, ra); return (GC_store_debug_info(result, (word)lb, s, (word)i)); }
/* with GC_gcj_debug_kind. */ GC_PTR GC_debug_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr, GC_EXTRA_PARAMS) { GC_PTR result; /* We're careful to avoid extra calls, which could */ /* confuse the backtrace. */ LOCK(); maybe_finalize(); result = GC_generic_malloc_inner(lb + DEBUG_BYTES, GC_gcj_debug_kind); if (result == 0) { UNLOCK(); GC_err_printf2("GC_debug_gcj_malloc(%ld, 0x%lx) returning NIL (", (unsigned long) lb, (unsigned long) ptr_to_struct_containing_descr); GC_err_puts(s); GC_err_printf1(":%ld)\n", (unsigned long)i); return(GC_oom_fn(lb)); } *((void **)((ptr_t)result + sizeof(oh))) = ptr_to_struct_containing_descr; UNLOCK(); if (!GC_debugging_started) { GC_start_debugging(); } ADD_CALL_CHAIN(result, ra); return (GC_store_debug_info(result, (word)lb, s, (word)i)); }
/* allocated as a small object. */ void * GC_gcj_fast_malloc(size_t lw, void * ptr_to_struct_containing_descr) { ptr_t op; ptr_t * opp; DCL_LOCK_STATE; opp = &(GC_gcjobjfreelist[lw]); LOCK(); op = *opp; if( EXPECT(op == 0, 0) ) { maybe_finalize(); op = (ptr_t)GC_clear_stack( GC_generic_malloc_words_small_inner(lw, GC_gcj_kind)); if (0 == op) { UNLOCK(); return GC_oom_fn(WORDS_TO_BYTES(lw)); } } else { *opp = obj_link(op); GC_words_allocd += lw; } *(void **)op = ptr_to_struct_containing_descr; UNLOCK(); return((GC_PTR) op); }
GC_PTR GC_local_malloc_atomic(size_t bytes) { if (EXPECT(!SMALL_ENOUGH(bytes), 0)) { return(GC_malloc_atomic(bytes)); } else { int index = INDEX_FROM_BYTES(bytes); ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key)) -> ptrfree_freelists + index; ptr_t my_entry = *my_fl; if (EXPECT((word)my_entry >= HBLKSIZE, 1)) { GC_PTR result = (GC_PTR)my_entry; *my_fl = obj_link(my_entry); return result; } else if ((word)my_entry - 1 < DIRECT_GRANULES) { *my_fl = my_entry + index + 1; return GC_malloc_atomic(bytes); } else { GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl); /* *my_fl is updated while the collector is excluded; */ /* the free list is always visible to the collector as */ /* such. */ if (*my_fl == 0) return GC_oom_fn(bytes); return GC_local_malloc_atomic(bytes); } } }
int GC_general_register_disappearing_link(void * * link, void * obj) { struct disappearing_link *curr_dl; size_t index; struct disappearing_link * new_dl; DCL_LOCK_STATE; if ((word)link & (ALIGNMENT-1)) ABORT("Bad arg to GC_general_register_disappearing_link"); # ifdef THREADS LOCK(); # endif if (log_dl_table_size == -1 || GC_dl_entries > ((word)1 << log_dl_table_size)) { GC_grow_table((struct hash_chain_entry ***)(&dl_head), &log_dl_table_size); if (GC_print_stats) { GC_log_printf("Grew dl table to %u entries\n", (1 << log_dl_table_size)); } } index = HASH2(link, log_dl_table_size); curr_dl = dl_head[index]; for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) { if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) { curr_dl -> dl_hidden_obj = HIDE_POINTER(obj); # ifdef THREADS UNLOCK(); # endif return(1); } } new_dl = (struct disappearing_link *) GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL); if (0 == new_dl) { # ifdef THREADS UNLOCK(); # endif new_dl = (struct disappearing_link *) GC_oom_fn(sizeof(struct disappearing_link)); if (0 == new_dl) { GC_finalization_failures++; return(2); } /* It's not likely we'll make it here, but ... */ # ifdef THREADS LOCK(); # endif } new_dl -> dl_hidden_obj = HIDE_POINTER(obj); new_dl -> dl_hidden_link = HIDE_POINTER(link); dl_set_next(new_dl, dl_head[index]); dl_head[index] = new_dl; GC_dl_entries++; # ifdef THREADS UNLOCK(); # endif return(0); }
/* This adds a byte at the end of the object if GC_malloc would.*/ void * GC_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr) { register ptr_t op; register ptr_t * opp; register word lw; DCL_LOCK_STATE; if( EXPECT(SMALL_OBJ(lb), 1) ) { # ifdef MERGE_SIZES lw = GC_size_map[lb]; # else lw = ALIGNED_WORDS(lb); # endif opp = &(GC_gcjobjfreelist[lw]); LOCK(); op = *opp; if(EXPECT(op == 0, 0)) { maybe_finalize(); op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind); if (0 == op) { UNLOCK(); return(GC_oom_fn(lb)); } # ifdef MERGE_SIZES lw = GC_size_map[lb]; /* May have been uninitialized. */ # endif } else { *opp = obj_link(op); GC_words_allocd += lw; } *(void **)op = ptr_to_struct_containing_descr; GC_ASSERT(((void **)op)[1] == 0); UNLOCK(); } else { LOCK(); maybe_finalize(); op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind); if (0 == op) { UNLOCK(); return(GC_oom_fn(lb)); } *(void **)op = ptr_to_struct_containing_descr; UNLOCK(); } return((GC_PTR) op); }
void * GC_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr) #endif { ptr_t op; ptr_t * opp; word lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { lg = GC_size_map[lb]; opp = &(GC_gcjobjfreelist[lg]); LOCK(); op = *opp; if(EXPECT(op == 0, 0)) { maybe_finalize(); op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind); if (0 == op) { UNLOCK(); return(GC_oom_fn(lb)); } } else { *opp = obj_link(op); GC_bytes_allocd += GRANULES_TO_BYTES(lg); } *(void **)op = ptr_to_struct_containing_descr; GC_ASSERT(((void **)op)[1] == 0); UNLOCK(); } else { LOCK(); maybe_finalize(); op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind); if (0 == op) { UNLOCK(); return(GC_oom_fn(lb)); } *(void **)op = ptr_to_struct_containing_descr; UNLOCK(); } return((void *) op); }
GC_PTR GC_local_malloc(size_t bytes) { if (EXPECT(!SMALL_ENOUGH(bytes),0)) { return(GC_malloc(bytes)); } else { int index = INDEX_FROM_BYTES(bytes); ptr_t * my_fl; ptr_t my_entry; # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC) GC_key_t k = GC_thread_key; # endif void * tsd; # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC) if (EXPECT(0 == k, 0)) { /* This can happen if we get called when the world is */ /* being initialized. Whether we can actually complete */ /* the initialization then is unclear. */ GC_init_parallel(); k = GC_thread_key; } # endif tsd = GC_getspecific(GC_thread_key); # ifdef GC_ASSERTIONS LOCK(); GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self())); UNLOCK(); # endif my_fl = ((GC_thread)tsd) -> normal_freelists + index; my_entry = *my_fl; if (EXPECT((word)my_entry >= HBLKSIZE, 1)) { ptr_t next = obj_link(my_entry); GC_PTR result = (GC_PTR)my_entry; *my_fl = next; obj_link(my_entry) = 0; PREFETCH_FOR_WRITE(next); return result; } else if ((word)my_entry - 1 < DIRECT_GRANULES) { *my_fl = my_entry + index + 1; return GC_malloc(bytes); } else { GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl); if (*my_fl == 0) return GC_oom_fn(bytes); return GC_local_malloc(bytes); } } }
GC_PTR GC_local_gcj_malloc(size_t bytes, void * ptr_to_struct_containing_descr) { GC_ASSERT(GC_gcj_malloc_initialized); if (EXPECT(!SMALL_ENOUGH(bytes), 0)) { return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr); } else { int index = INDEX_FROM_BYTES(bytes); ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key)) -> gcj_freelists + index; ptr_t my_entry = *my_fl; if (EXPECT((word)my_entry >= HBLKSIZE, 1)) { GC_PTR result = (GC_PTR)my_entry; GC_ASSERT(!GC_incremental); /* We assert that any concurrent marker will stop us. */ /* Thus it is impossible for a mark procedure to see the */ /* allocation of the next object, but to see this object */ /* still containing a free list pointer. Otherwise the */ /* marker might find a random "mark descriptor". */ *(volatile ptr_t *)my_fl = obj_link(my_entry); /* We must update the freelist before we store the pointer. */ /* Otherwise a GC at this point would see a corrupted */ /* free list. */ /* A memory barrier is probably never needed, since the */ /* action of stopping this thread will cause prior writes */ /* to complete. */ GC_ASSERT(((void * volatile *)result)[1] == 0); *(void * volatile *)result = ptr_to_struct_containing_descr; return result; } else if ((word)my_entry - 1 < DIRECT_GRANULES) { if (!GC_incremental) *my_fl = my_entry + index + 1; /* In the incremental case, we always have to take this */ /* path. Thus we leave the counter alone. */ return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr); } else { GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl); if (*my_fl == 0) return GC_oom_fn(bytes); return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr); } } }
/* finalized when this finalizer is invoked. */ GC_API void GC_register_finalizer_inner(void * obj, GC_finalization_proc fn, void *cd, GC_finalization_proc *ofn, void **ocd, finalization_mark_proc mp) { ptr_t base; struct finalizable_object * curr_fo, * prev_fo; size_t index; struct finalizable_object *new_fo; hdr *hhdr; DCL_LOCK_STATE; # ifdef THREADS LOCK(); # endif if (log_fo_table_size == -1 || GC_fo_entries > ((word)1 << log_fo_table_size)) { GC_grow_table((struct hash_chain_entry ***)(&fo_head), &log_fo_table_size); if (GC_print_stats) { GC_log_printf("Grew fo table to %u entries\n", (1 << log_fo_table_size)); } } /* in the THREADS case signals are disabled and we hold allocation */ /* lock; otherwise neither is true. Proceed carefully. */ base = (ptr_t)obj; index = HASH2(base, log_fo_table_size); prev_fo = 0; curr_fo = fo_head[index]; while (curr_fo != 0) { GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object)); if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) { /* Interruption by a signal in the middle of this */ /* should be safe. The client may see only *ocd */ /* updated, but we'll declare that to be his */ /* problem. */ if (ocd) *ocd = (void *) (curr_fo -> fo_client_data); if (ofn) *ofn = curr_fo -> fo_fn; /* Delete the structure for base. */ if (prev_fo == 0) { fo_head[index] = fo_next(curr_fo); } else { fo_set_next(prev_fo, fo_next(curr_fo)); } if (fn == 0) { GC_fo_entries--; /* May not happen if we get a signal. But a high */ /* estimate will only make the table larger than */ /* necessary. */ # if !defined(THREADS) && !defined(DBG_HDRS_ALL) GC_free((void *)curr_fo); # endif } else { curr_fo -> fo_fn = fn; curr_fo -> fo_client_data = (ptr_t)cd; curr_fo -> fo_mark_proc = mp; /* Reinsert it. We deleted it first to maintain */ /* consistency in the event of a signal. */ if (prev_fo == 0) { fo_head[index] = curr_fo; } else { fo_set_next(prev_fo, curr_fo); } } # ifdef THREADS UNLOCK(); # endif return; } prev_fo = curr_fo; curr_fo = fo_next(curr_fo); } if (ofn) *ofn = 0; if (ocd) *ocd = 0; if (fn == 0) { # ifdef THREADS UNLOCK(); # endif return; } GET_HDR(base, hhdr); if (0 == hhdr) { /* We won't collect it, hence finalizer wouldn't be run. */ # ifdef THREADS UNLOCK(); # endif return; } new_fo = (struct finalizable_object *) GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL); if (EXPECT(0 == new_fo, FALSE)) { # ifdef THREADS UNLOCK(); # endif new_fo = (struct finalizable_object *) GC_oom_fn(sizeof(struct finalizable_object)); if (0 == new_fo) { GC_finalization_failures++; return; } /* It's not likely we'll make it here, but ... */ # ifdef THREADS LOCK(); # endif } GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object)); new_fo -> fo_hidden_base = (word)HIDE_POINTER(base); new_fo -> fo_fn = fn; new_fo -> fo_client_data = (ptr_t)cd; new_fo -> fo_object_size = hhdr -> hb_sz; new_fo -> fo_mark_proc = mp; fo_set_next(new_fo, fo_head[index]); GC_fo_entries++; fo_head[index] = new_fo; # ifdef THREADS UNLOCK(); # endif }