int GC_pthread_create(pthread_t *new_thread, const pthread_attr_t *attr_in, void * (*thread_execp)(void *), void *arg) { int result; GC_thread t; pthread_t my_new_thread; pthread_attr_t attr; word my_flags = 0; int flag; void * stack = 0; size_t stack_size = 0; int n; struct sched_param schedparam; (void)pthread_attr_init(&attr); if (attr_in != 0) { (void)pthread_attr_getstacksize(attr_in, &stack_size); (void)pthread_attr_getstackaddr(attr_in, &stack); } LOCK(); if (!GC_is_initialized) { GC_init_inner(); } GC_multithreaded++; if (stack == 0) { if (stack_size == 0) stack_size = 1048576; /* ^-- 1 MB (this was GC_min_stack_sz, but that * violates the pthread_create documentation which * says the default value if none is supplied is * 1MB) */ else stack_size += thr_min_stack(); stack = (void *)GC_stack_alloc(&stack_size); if (stack == 0) { GC_multithreaded--; UNLOCK(); errno = ENOMEM; return -1; } } else { my_flags |= CLIENT_OWNS_STACK; } (void)pthread_attr_setstacksize(&attr, stack_size); (void)pthread_attr_setstackaddr(&attr, stack); if (attr_in != 0) { (void)pthread_attr_getscope(attr_in, &n); (void)pthread_attr_setscope(&attr, n); (void)pthread_attr_getschedparam(attr_in, &schedparam); (void)pthread_attr_setschedparam(&attr, &schedparam); (void)pthread_attr_getschedpolicy(attr_in, &n); (void)pthread_attr_setschedpolicy(&attr, n); (void)pthread_attr_getinheritsched(attr_in, &n); (void)pthread_attr_setinheritsched(&attr, n); (void)pthread_attr_getdetachstate(attr_in, &flag); if (flag == PTHREAD_CREATE_DETACHED) { my_flags |= DETACHED; } (void)pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); } /* * thr_create can call malloc(), which if redirected will * attempt to acquire the allocation lock. * Unlock here to prevent deadlock. */ #if 0 #ifdef I386 UNLOCK(); #endif #endif result = pthread_create(&my_new_thread, &attr, thread_execp, arg); #if 0 #ifdef I386 LOCK(); #endif #endif if (result == 0) { t = GC_new_thread(my_new_thread); t -> flags = my_flags; if (!(my_flags & DETACHED)) cond_init(&(t->join_cv), USYNC_THREAD, 0); t -> stack = stack; t -> stack_size = stack_size; if (new_thread != 0) *new_thread = my_new_thread; pthread_cond_signal(&GC_create_cv); } else { if (!(my_flags & CLIENT_OWNS_STACK)) { GC_stack_free(stack, stack_size); } GC_multithreaded--; } UNLOCK(); pthread_attr_destroy(&attr); return(result); }
/* We hold the allocation lock. */ void GC_thr_init(void) { # ifndef GC_DARWIN_THREADS int dummy; # endif GC_thread t; if (GC_thr_initialized) return; GC_thr_initialized = TRUE; # ifdef HANDLE_FORK /* Prepare for a possible fork. */ pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc, GC_fork_child_proc); # endif /* HANDLE_FORK */ # if defined(INCLUDE_LINUX_THREAD_DESCR) /* Explicitly register the region including the address */ /* of a thread local variable. This should include thread */ /* locals for the main thread, except for those allocated */ /* in response to dlopen calls. */ { ptr_t thread_local_addr = (ptr_t)(&dummy_thread_local); ptr_t main_thread_start, main_thread_end; if (!GC_enclosing_mapping(thread_local_addr, &main_thread_start, &main_thread_end)) { ABORT("Failed to find mapping for main thread thread locals"); } GC_add_roots_inner(main_thread_start, main_thread_end, FALSE); } # endif /* Add the initial thread, so we can stop it. */ t = GC_new_thread(pthread_self()); # ifdef GC_DARWIN_THREADS t -> stop_info.mach_thread = mach_thread_self(); # else t -> stop_info.stack_ptr = (ptr_t)(&dummy); # endif t -> flags = DETACHED | MAIN_THREAD; GC_stop_init(); /* Set GC_nprocs. */ { char * nprocs_string = GETENV("GC_NPROCS"); GC_nprocs = -1; if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string); } if (GC_nprocs <= 0) { # if defined(GC_HPUX_THREADS) GC_nprocs = pthread_num_processors_np(); # endif # if defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \ || defined(GC_SOLARIS_THREADS) || defined(GC_GNU_THREADS) GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN); if (GC_nprocs <= 0) GC_nprocs = 1; # endif # if defined(GC_IRIX_THREADS) GC_nprocs = sysconf(_SC_NPROC_ONLN); if (GC_nprocs <= 0) GC_nprocs = 1; # endif # if defined(GC_NETBSD_THREADS) GC_nprocs = get_ncpu(); # endif # if defined(GC_OPENBSD_THREADS) GC_nprocs = 1; # endif # if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) int ncpus = 1; size_t len = sizeof(ncpus); sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);
/* * This may be called from DllMain, and hence operates under unusual * constraints. In particular, it must be lock-free if GC_win32_dll_threads * is set. Always called from the thread being added. * If GC_win32_dll_threads is not set, we already hold the allocation lock, * except possibly during single-threaded start-up code. */ static GC_thread GC_register_my_thread_inner(struct GC_stack_base *sb, DWORD thread_id) { GC_vthread me; /* The following should be a noop according to the win32 */ /* documentation. There is empirical evidence that it */ /* isn't. - HB */ # if defined(MPROTECT_VDB) # if defined(GWW_VDB) if (GC_incremental && !GC_gww_dirty_init()) SetUnhandledExceptionFilter(GC_write_fault_handler); # else if (GC_incremental) SetUnhandledExceptionFilter(GC_write_fault_handler); # endif # endif if (GC_win32_dll_threads) { int i; /* It appears to be unsafe to acquire a lock here, since this */ /* code is apparently not preeemptible on some systems. */ /* (This is based on complaints, not on Microsoft's official */ /* documentation, which says this should perform "only simple */ /* initialization tasks".) */ /* Hence we make do with nonblocking synchronization. */ /* It has been claimed that DllMain is really only executed with */ /* a particular system lock held, and thus careful use of locking */ /* around code that doesn't call back into the system libraries */ /* might be OK. But this hasn't been tested across all win32 */ /* variants. */ /* cast away volatile qualifier */ for (i = 0; InterlockedExchange((IE_t)&dll_thread_table[i].in_use,1) != 0; i++) { /* Compare-and-swap would make this cleaner, but that's not */ /* supported before Windows 98 and NT 4.0. In Windows 2000, */ /* InterlockedExchange is supposed to be replaced by */ /* InterlockedExchangePointer, but that's not really what I */ /* want here. */ /* FIXME: We should eventually declare Win95 dead and use AO_ */ /* primitives here. */ if (i == MAX_THREADS - 1) ABORT("too many threads"); } /* Update GC_max_thread_index if necessary. The following is safe, */ /* and unlike CompareExchange-based solutions seems to work on all */ /* Windows95 and later platforms. */ /* Unfortunately, GC_max_thread_index may be temporarily out of */ /* bounds, so readers have to compensate. */ while (i > GC_max_thread_index) { InterlockedIncrement((IE_t)&GC_max_thread_index); } if (GC_max_thread_index >= MAX_THREADS) { /* We overshot due to simultaneous increments. */ /* Setting it to MAX_THREADS-1 is always safe. */ GC_max_thread_index = MAX_THREADS - 1; } me = dll_thread_table + i; } else /* Not using DllMain */ { GC_ASSERT(I_HOLD_LOCK()); GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */ me = GC_new_thread(thread_id); GC_in_thread_creation = FALSE; } # ifdef GC_PTHREADS /* me can be NULL -> segfault */ me -> pthread_id = pthread_self(); # endif if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(), (HANDLE*)&(me -> handle), 0, 0, DUPLICATE_SAME_ACCESS)) { DWORD last_error = GetLastError(); GC_err_printf("Last error code: %d\n", last_error); ABORT("DuplicateHandle failed"); } me -> stack_base = sb -> mem_base; /* Up until this point, GC_push_all_stacks considers this thread */ /* invalid. */ /* Up until this point, this entry is viewed as reserved but invalid */ /* by GC_delete_thread. */ me -> id = thread_id; # if defined(THREAD_LOCAL_ALLOC) GC_init_thread_local((GC_tlfs)(&(me->tlfs))); # endif if (me -> stack_base == NULL) ABORT("Bad stack base in GC_register_my_thread_inner"); if (GC_win32_dll_threads) { if (GC_please_stop) { AO_store(&GC_attached_thread, TRUE); AO_nop_full(); // Later updates must become visible after this. } /* We'd like to wait here, but can't, since waiting in DllMain */ /* provokes deadlocks. */ /* Thus we force marking to be restarted instead. */ } else { GC_ASSERT(!GC_please_stop); /* Otherwise both we and the thread stopping code would be */ /* holding the allocation lock. */ } return (GC_thread)(me); }