GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t bytes) { size_t granules = ROUNDED_UP_GRANULES(bytes); void *tsd; void *result; void **tiny_fl; # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC) GC_key_t k = GC_thread_key; if (EXPECT(0 == k, FALSE)) { /* We haven't yet run GC_init_parallel. That means */ /* we also aren't locking, so this is fairly cheap. */ return GC_core_malloc_atomic(bytes); } tsd = GC_getspecific(k); # else tsd = GC_getspecific(GC_thread_key); # endif # if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS) if (EXPECT(0 == tsd, FALSE)) { return GC_core_malloc_atomic(bytes); } # endif GC_ASSERT(GC_is_initialized); tiny_fl = ((GC_tlfs)tsd) -> ptrfree_freelists; GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES, PTRFREE, GC_core_malloc_atomic(bytes), (void)0 /* no init */); return result; }
GC_API void * GC_generic_malloc_kind(size_t bytes, int kind) { size_t granules = ROUNDED_UP_GRANULES(bytes); void *tsd; void *result; void **tiny_fl; # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC) GC_key_t k = GC_thread_key; if (EXPECT(0 == k, FALSE)) { /* We haven't yet run GC_init_parallel. That means */ /* we also aren't locking, so this is fairly cheap. */ return GC_generic_malloc_kind_global(bytes, kind); } tsd = GC_getspecific(k); # else tsd = GC_getspecific(GC_thread_key); # endif # if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS) if (EXPECT(0 == tsd, FALSE)) { return GC_generic_malloc_kind_global(bytes, kind); } # endif GC_ASSERT(GC_is_initialized); GC_ASSERT(GC_is_thread_tsd_valid(tsd)); tiny_fl = ((GC_tlfs)tsd)->freelists[kind]; GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES, kind, GC_generic_malloc_kind_global(bytes, kind), obj_link(result) = 0); return result; }
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k) { void * result; DCL_LOCK_STATE; if (EXPECT(GC_have_errors, FALSE)) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); GC_DBG_COLLECT_AT_MALLOC(lb); if (SMALL_OBJ(lb)) { LOCK(); result = GC_generic_malloc_inner(lb, k); UNLOCK(); } else { size_t lg; size_t lb_rounded; word n_blocks; GC_bool init; lg = ROUNDED_UP_GRANULES(lb); lb_rounded = GRANULES_TO_BYTES(lg); if (lb_rounded < lb) return((*GC_get_oom_fn())(lb)); n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; LOCK(); result = (ptr_t)GC_alloc_large(lb_rounded, k, 0); if (0 != result) { if (GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } else { # ifdef THREADS /* Clear any memory that might be used for GC descriptors */ /* before we release the lock. */ ((word *)result)[0] = 0; ((word *)result)[1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0; # endif } } GC_bytes_allocd += lb_rounded; UNLOCK(); if (init && !GC_debugging_started && 0 != result) { BZERO(result, n_blocks * HBLKSIZE); } } if (0 == result) { return((*GC_get_oom_fn())(lb)); } else { return(result); } }
/* collector has been explicitly initialized. */ GC_API GC_ATTR_MALLOC void * GC_CALL GC_gcj_malloc(size_t bytes, void * ptr_to_struct_containing_descr) { if (EXPECT(GC_incremental, FALSE)) { return GC_core_gcj_malloc(bytes, ptr_to_struct_containing_descr); } else { size_t granules = ROUNDED_UP_GRANULES(bytes); void *result; void **tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key)) -> gcj_freelists; GC_ASSERT(GC_gcj_malloc_initialized); GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES, GC_gcj_kind, GC_core_gcj_malloc(bytes, ptr_to_struct_containing_descr), {AO_compiler_barrier(); *(void **)result = ptr_to_struct_containing_descr;});
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int knd) { size_t granules; void *tsd; void *result; # if MAXOBJKINDS > THREAD_FREELISTS_KINDS if (EXPECT(knd >= THREAD_FREELISTS_KINDS, FALSE)) { return GC_malloc_kind_global(bytes, knd); } # endif # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC) { GC_key_t k = GC_thread_key; if (EXPECT(0 == k, FALSE)) { /* We haven't yet run GC_init_parallel. That means */ /* we also aren't locking, so this is fairly cheap. */ return GC_malloc_kind_global(bytes, knd); } tsd = GC_getspecific(k); } # else tsd = GC_getspecific(GC_thread_key); # endif # if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS) if (EXPECT(0 == tsd, FALSE)) { return GC_malloc_kind_global(bytes, knd); } # endif GC_ASSERT(GC_is_initialized); GC_ASSERT(GC_is_thread_tsd_valid(tsd)); granules = ROUNDED_UP_GRANULES(bytes); GC_FAST_MALLOC_GRANS(result, granules, ((GC_tlfs)tsd) -> _freelists[knd], DIRECT_GRANULES, knd, GC_malloc_kind_global(bytes, knd), (void)(knd == PTRFREE ? NULL : (obj_link(result) = 0))); # ifdef LOG_ALLOCS GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n", (unsigned long)bytes, knd, result, (unsigned long)GC_gc_no); # endif return result; }
GC_API void * GC_CALL GC_finalized_malloc(size_t client_lb, const struct GC_finalizer_closure *fclos) { size_t lb = client_lb + sizeof(void *); size_t lg = ROUNDED_UP_GRANULES(lb); GC_tlfs tsd; void *result; void **tiny_fl, **my_fl, *my_entry; void *next; if (EXPECT(lg >= GC_TINY_FREELISTS, FALSE)) return GC_core_finalized_malloc(client_lb, fclos); tsd = GC_getspecific(GC_thread_key); tiny_fl = tsd->finalized_freelists; my_fl = tiny_fl + lg; my_entry = *my_fl; while (EXPECT((word)my_entry <= DIRECT_GRANULES + GC_TINY_FREELISTS + 1, FALSE)) { if ((word)my_entry - 1 < DIRECT_GRANULES) { *my_fl = (ptr_t)my_entry + lg + 1; return GC_core_finalized_malloc(client_lb, fclos); } else { GC_generic_malloc_many(GC_RAW_BYTES_FROM_INDEX(lg), GC_finalized_kind, my_fl); my_entry = *my_fl; if (my_entry == 0) { return (*GC_get_oom_fn())(lb); } } } next = obj_link(my_entry); result = (void *)my_entry; *my_fl = next; obj_link(result) = 0; ((const void **)result)[GRANULES_TO_WORDS(lg) - 1] = fclos; PREFETCH_FOR_WRITE(next); return result; }
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t bytes) { size_t granules = ROUNDED_UP_GRANULES(bytes); void *tsd; void *result; void **tiny_fl; # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC) GC_key_t k = GC_thread_key; if (EXPECT(0 == k, FALSE)) { /* We haven't yet run GC_init_parallel. That means */ /* we also aren't locking, so this is fairly cheap. */ return GC_core_malloc(bytes); } tsd = GC_getspecific(k); # else tsd = GC_getspecific(GC_thread_key); # endif # if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS) if (EXPECT(0 == tsd, FALSE)) { return GC_core_malloc(bytes); } # endif GC_ASSERT(GC_is_initialized); GC_ASSERT(GC_is_thread_tsd_valid(tsd)); tiny_fl = ((GC_tlfs)tsd) -> normal_freelists; GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES, NORMAL, GC_core_malloc(bytes), obj_link(result)=0); # ifdef LOG_ALLOCS GC_log_printf("GC_malloc(%lu) returned %p, recent GC #%lu\n", (unsigned long)bytes, result, (unsigned long)GC_gc_no); # endif return result; }