/* hold lock: */ void * GC_generic_malloc_inner(size_t lb, int k) { void *op; if(SMALL_OBJ(lb)) { struct obj_kind * kind = GC_obj_kinds + k; size_t lg = GC_size_map[lb]; void ** opp = &(kind -> ok_freelist[lg]); if( (op = *opp) == 0 ) { if (GC_size_map[lb] == 0) { if (!GC_is_initialized) GC_init_inner(); if (GC_size_map[lb] == 0) GC_extend_size_map(lb); return(GC_generic_malloc_inner(lb, k)); } if (kind -> ok_reclaim_list == 0) { if (!GC_alloc_reclaim_list(kind)) goto out; } op = GC_allocobj(lg, k); if (op == 0) goto out; } *opp = obj_link(op); obj_link(op) = 0; GC_bytes_allocd += GRANULES_TO_BYTES(lg); } else { op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0); GC_bytes_allocd += lb; } out: return op; }
/* EXTRA_BYTES were already added to lb. */ ptr_t GC_alloc_large(size_t lb, int k, unsigned flags) { struct hblk * h; word n_blocks; ptr_t result; /* Round up to a multiple of a granule. */ lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1); n_blocks = OBJ_SZ_TO_BLOCKS(lb); if (!GC_is_initialized) GC_init_inner(); /* Do our share of marking work */ if(GC_incremental && !GC_dont_gc) GC_collect_a_little_inner((int)n_blocks); h = GC_allochblk(lb, k, flags); # ifdef USE_MUNMAP if (0 == h) { GC_merge_unmapped(); h = GC_allochblk(lb, k, flags); } # endif while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) { h = GC_allochblk(lb, k, flags); } if (h == 0) { result = 0; } else { size_t total_bytes = n_blocks * HBLKSIZE; if (n_blocks > 1) { GC_large_allocd_bytes += total_bytes; if (GC_large_allocd_bytes > GC_max_large_allocd_bytes) GC_max_large_allocd_bytes = GC_large_allocd_bytes; } result = h -> hb_body; } return result; }
int GC_pthread_create(pthread_t *new_thread, const pthread_attr_t *attr_in, void * (*thread_execp)(void *), void *arg) { int result; GC_thread t; pthread_t my_new_thread; pthread_attr_t attr; word my_flags = 0; int flag; void * stack = 0; size_t stack_size = 0; int n; struct sched_param schedparam; (void)pthread_attr_init(&attr); if (attr_in != 0) { (void)pthread_attr_getstacksize(attr_in, &stack_size); (void)pthread_attr_getstackaddr(attr_in, &stack); } LOCK(); if (!GC_is_initialized) { GC_init_inner(); } GC_multithreaded++; if (stack == 0) { if (stack_size == 0) stack_size = 1048576; /* ^-- 1 MB (this was GC_min_stack_sz, but that * violates the pthread_create documentation which * says the default value if none is supplied is * 1MB) */ else stack_size += thr_min_stack(); stack = (void *)GC_stack_alloc(&stack_size); if (stack == 0) { GC_multithreaded--; UNLOCK(); errno = ENOMEM; return -1; } } else { my_flags |= CLIENT_OWNS_STACK; } (void)pthread_attr_setstacksize(&attr, stack_size); (void)pthread_attr_setstackaddr(&attr, stack); if (attr_in != 0) { (void)pthread_attr_getscope(attr_in, &n); (void)pthread_attr_setscope(&attr, n); (void)pthread_attr_getschedparam(attr_in, &schedparam); (void)pthread_attr_setschedparam(&attr, &schedparam); (void)pthread_attr_getschedpolicy(attr_in, &n); (void)pthread_attr_setschedpolicy(&attr, n); (void)pthread_attr_getinheritsched(attr_in, &n); (void)pthread_attr_setinheritsched(&attr, n); (void)pthread_attr_getdetachstate(attr_in, &flag); if (flag == PTHREAD_CREATE_DETACHED) { my_flags |= DETACHED; } (void)pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); } /* * thr_create can call malloc(), which if redirected will * attempt to acquire the allocation lock. * Unlock here to prevent deadlock. */ #if 0 #ifdef I386 UNLOCK(); #endif #endif result = pthread_create(&my_new_thread, &attr, thread_execp, arg); #if 0 #ifdef I386 LOCK(); #endif #endif if (result == 0) { t = GC_new_thread(my_new_thread); t -> flags = my_flags; if (!(my_flags & DETACHED)) cond_init(&(t->join_cv), USYNC_THREAD, 0); t -> stack = stack; t -> stack_size = stack_size; if (new_thread != 0) *new_thread = my_new_thread; pthread_cond_signal(&GC_create_cv); } else { if (!(my_flags & CLIENT_OWNS_STACK)) { GC_stack_free(stack, stack_size); } GC_multithreaded--; } UNLOCK(); pthread_attr_destroy(&attr); return(result); }