GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t lb) #endif { void *op; size_t lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { GC_DBG_COLLECT_AT_MALLOC(lb); lg = GC_size_map[lb]; LOCK(); op = GC_freelists[NORMAL][lg]; if (EXPECT(0 == op, FALSE)) { UNLOCK(); return (GENERAL_MALLOC((word)lb, NORMAL)); } GC_ASSERT(0 == obj_link(op) || ((word)obj_link(op) <= (word)GC_greatest_plausible_heap_addr && (word)obj_link(op) >= (word)GC_least_plausible_heap_addr)); GC_freelists[NORMAL][lg] = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); return op; } else { return(GENERAL_MALLOC(lb, NORMAL)); } }
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k) { void * result; DCL_LOCK_STATE; if (EXPECT(GC_have_errors, FALSE)) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); GC_DBG_COLLECT_AT_MALLOC(lb); if (SMALL_OBJ(lb)) { LOCK(); result = GC_generic_malloc_inner(lb, k); UNLOCK(); } else { size_t lg; size_t lb_rounded; word n_blocks; GC_bool init; lg = ROUNDED_UP_GRANULES(lb); lb_rounded = GRANULES_TO_BYTES(lg); if (lb_rounded < lb) return((*GC_get_oom_fn())(lb)); n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; LOCK(); result = (ptr_t)GC_alloc_large(lb_rounded, k, 0); if (0 != result) { if (GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } else { # ifdef THREADS /* Clear any memory that might be used for GC descriptors */ /* before we release the lock. */ ((word *)result)[0] = 0; ((word *)result)[1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0; # endif } } GC_bytes_allocd += lb_rounded; UNLOCK(); if (init && !GC_debugging_started && 0 != result) { BZERO(result, n_blocks * HBLKSIZE); } } if (0 == result) { return((*GC_get_oom_fn())(lb)); } else { return(result); } }
/* Allocate lb bytes of pointerful, traced, but not collectible data. */ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_uncollectable(size_t lb) { void *op; size_t lg; DCL_LOCK_STATE; if (SMALL_OBJ(lb)) { GC_DBG_COLLECT_AT_MALLOC(lb); if (EXTRA_BYTES != 0 && lb != 0) lb--; /* We don't need the extra byte, since this won't be */ /* collected anyway. */ lg = GC_size_map[lb]; LOCK(); op = GC_freelists[UNCOLLECTABLE][lg]; if (EXPECT(op != 0, TRUE)) { GC_freelists[UNCOLLECTABLE][lg] = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); /* Mark bit ws already set on free list. It will be */ /* cleared only temporarily during a collection, as a */ /* result of the normal free list mark bit clearing. */ GC_non_gc_bytes += GRANULES_TO_BYTES(lg); UNLOCK(); } else { UNLOCK(); op = GC_generic_malloc(lb, UNCOLLECTABLE); /* For small objects, the free lists are completely marked. */ } GC_ASSERT(0 == op || GC_is_marked(op)); } else { hdr * hhdr; op = GC_generic_malloc(lb, UNCOLLECTABLE); if (0 == op) return(0); GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */ hhdr = HDR(op); /* We don't need the lock here, since we have an undisguised */ /* pointer. We do need to hold the lock while we adjust */ /* mark bits. */ LOCK(); set_mark_bit_from_hdr(hhdr, 0); /* Only object. */ # ifndef THREADS GC_ASSERT(hhdr -> hb_n_marks == 0); /* This is not guaranteed in the multi-threaded case */ /* because the counter could be updated before locking. */ # endif hhdr -> hb_n_marks = 1; UNLOCK(); } return op; }
GC_API void * GC_CALL GC_finalized_malloc(size_t lb, const struct GC_finalizer_closure *fclos) #endif { ptr_t op; word lg; DCL_LOCK_STATE; lb += sizeof(void *); GC_ASSERT(done_init); if (SMALL_OBJ(lb)) { GC_DBG_COLLECT_AT_MALLOC(lb); lg = GC_size_map[lb]; LOCK(); op = GC_finalized_objfreelist[lg]; if (EXPECT(0 == op, FALSE)) { UNLOCK(); op = GC_generic_malloc(lb, GC_finalized_kind); if (NULL == op) return NULL; /* GC_generic_malloc has extended the size map for us. */ lg = GC_size_map[lb]; } else { GC_finalized_objfreelist[lg] = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); } GC_ASSERT(lg > 0); ((const void **)op)[GRANULES_TO_WORDS(lg) - 1] = fclos; } else { size_t op_sz; op = GC_generic_malloc(lb, GC_finalized_kind); if (NULL == op) return NULL; op_sz = GC_size(op); GC_ASSERT(op_sz >= lb); ((const void **)op)[op_sz / sizeof(void *) - 1] = fclos; } return GC_clear_stack(op); }
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t lb) #endif { void *op; size_t lg; DCL_LOCK_STATE; if(SMALL_OBJ(lb)) { GC_DBG_COLLECT_AT_MALLOC(lb); lg = GC_size_map[lb]; LOCK(); op = GC_freelists[PTRFREE][lg]; if (EXPECT(0 == op, FALSE)) { UNLOCK(); return(GENERAL_MALLOC((word)lb, PTRFREE)); } GC_freelists[PTRFREE][lg] = obj_link(op); GC_bytes_allocd += GRANULES_TO_BYTES(lg); UNLOCK(); return((void *) op); } else { return(GENERAL_MALLOC((word)lb, PTRFREE)); } }