void HostInternal::driver( const size_t thread_rank ) { // Bind this thread to a unique processing unit // with all members of a gang in the same NUMA region. if ( bind_thread( thread_rank ) ) { HostThread this_thread ; m_thread[ thread_rank ] = & this_thread ; // Initialize thread ranks and fan-in relationships: if ( initialize_thread( thread_rank , this_thread ) ) { // Inform master thread that binding and initialization succeeded. m_master_thread.set( HostThread::ThreadActive ); try { // Work loop: while ( HostThread::ThreadActive == this_thread.m_state ) { // When the work is complete the state will be Inactive or Terminate m_worker->execute_on_thread( this_thread ); // If this_thread is in the Inactive state then wait for activation. this_thread.wait( HostThread::ThreadInactive ); } } catch( const std::exception & x ) { // mfh 29 May 2012: Doesn't calling std::terminate() seem a // little violent? On the other hand, C++ doesn't define how // to transport exceptions between threads (until C++11). // Since this is a worker thread, it would be hard to tell the // master thread what happened. std::cerr << "Thread " << thread_rank << " uncaught exception : " << x.what() << std::endl ; std::terminate(); } catch( ... ) { // mfh 29 May 2012: See note above on std::terminate(). std::cerr << "Thread " << thread_rank << " uncaught exception" << std::endl ; std::terminate(); } } } // Notify master thread that this thread has terminated. m_thread[ thread_rank ] = 0 ; m_master_thread.set( HostThread::ThreadTerminating ); }
void initialize_util_module() { initialize_debug(); initialize_trace(); initialize_serializer(); initialize_thread(); initialize_ascii(); initialize_thread_script_state(); initialize_script_state(); initialize_name(); initialize_name_generator(); initialize_lean_path(); }
bool HostInternal::spawn_threads( const unsigned gang_count , const unsigned worker_count ) { // If the process is bound to a particular node // then only use cores belonging to that node. // Otherwise use all nodes and all their cores. m_gang_count = gang_count ; m_worker_count = worker_count ; m_thread_count = gang_count * worker_count ; m_worker = & m_worker_block ; // Bind the process thread as thread_rank == 0 bool ok_spawn_threads = bind_thread( 0 ); // Spawn threads from last-to-first so that the // fan-in barrier thread relationships can be established. for ( unsigned rank = m_thread_count ; ok_spawn_threads && 0 < --rank ; ) { m_master_thread.set( HostThread::ThreadInactive ); // Spawn thread executing the 'driver' function. ok_spawn_threads = spawn( rank ); if ( ok_spawn_threads ) { // Thread spawned, wait for thread to activate: m_master_thread.wait( HostThread::ThreadInactive ); // Check if the thread initialized and bound correctly: ok_spawn_threads = HostThread::ThreadActive == m_master_thread.m_state ; if ( ok_spawn_threads ) { // Wait for spawned thread to deactivate HostThread * volatile * const threads = m_thread ; threads[ rank ]->wait( HostThread::ThreadActive ); // m_thread[ rank ]->wait( HostThread::ThreadActive ); } } } m_worker = NULL ; // All threads spawned, initialize the master-thread fan-in ok_spawn_threads = ok_spawn_threads && initialize_thread( 0 , m_master_thread ); if ( ! ok_spawn_threads ) { finalize(); } return ok_spawn_threads ; }
int main(int argc, char *argv[]) { print_init(); if(argc < 4) { printf("Usage: server [first addr] [first port] [last port]\n"); } first_name = argv[1]; port_first = atoi(argv[2]); port_last = atoi(argv[3]); sensor_timeout = DEF_SENSOR_TIMEOUT; sensor_period = DEF_SENSOR_PERIOD; if(initialize_sockets() < 0) { return -1; } initilize_sockaddr(); if(bind(sock_last, (struct sockaddr*) &last_addr, sizeof(last_addr)) < 0) { print_error("Failed to bind socket"); return -1; } print_success("Server launched"); if(initialize_thread() < 0) { print_error("Failed to init thread"); return -1; } if(pthread_create(&reconf_thread, NULL, reconf_fun, NULL) != 0) { return -1; } last_loop(); pthread_join(first_thread, NULL); return 0; }
int thread_create(struct thread *td, struct rtprio *rtp, int (*initialize_thread)(struct thread *, void *), void *thunk) { struct thread *newtd; struct proc *p; int error; p = td->td_proc; if (rtp != NULL) { switch(rtp->type) { case RTP_PRIO_REALTIME: case RTP_PRIO_FIFO: /* Only root can set scheduler policy */ if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0) return (EPERM); if (rtp->prio > RTP_PRIO_MAX) return (EINVAL); break; case RTP_PRIO_NORMAL: rtp->prio = 0; break; default: return (EINVAL); } } #ifdef RACCT if (racct_enable) { PROC_LOCK(p); error = racct_add(p, RACCT_NTHR, 1); PROC_UNLOCK(p); if (error != 0) return (EPROCLIM); } #endif /* Initialize our td */ error = kern_thr_alloc(p, 0, &newtd); if (error) goto fail; cpu_set_upcall(newtd, td); bzero(&newtd->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &newtd->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); newtd->td_proc = td->td_proc; thread_cow_get(newtd, td); error = initialize_thread(newtd, thunk); if (error != 0) { thread_cow_free(newtd); thread_free(newtd); goto fail; } PROC_LOCK(p); p->p_flag |= P_HADTHREADS; thread_link(newtd, p); bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); newtd->td_pax = p->p_pax; thread_lock(td); /* let the scheduler know about these things. */ sched_fork_thread(td, newtd); thread_unlock(td); if (P_SHOULDSTOP(p)) newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; if (p->p_flag2 & P2_LWP_EVENTS) newtd->td_dbgflags |= TDB_BORN; /* * Copy the existing thread VM policy into the new thread. */ vm_domain_policy_localcopy(&newtd->td_vm_dom_policy, &td->td_vm_dom_policy); PROC_UNLOCK(p); tidhash_add(newtd); thread_lock(newtd); if (rtp != NULL) { if (!(td->td_pri_class == PRI_TIMESHARE && rtp->type == RTP_PRIO_NORMAL)) { rtp_to_pri(rtp, newtd); sched_prio(newtd, newtd->td_user_pri); } /* ignore timesharing class */ } TD_SET_CAN_RUN(newtd); sched_add(newtd, SRQ_BORING); thread_unlock(newtd); return (0); fail: #ifdef RACCT if (racct_enable) { PROC_LOCK(p); racct_sub(p, RACCT_NTHR, 1); PROC_UNLOCK(p); } #endif return (error); }
int thread_create(struct thread *td, struct rtprio *rtp, int (*initialize_thread)(struct thread *, void *), void *thunk) { struct thread *newtd; struct proc *p; int error; p = td->td_proc; if (rtp != NULL) { switch(rtp->type) { case RTP_PRIO_REALTIME: case RTP_PRIO_FIFO: /* Only root can set scheduler policy */ if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0) return (EPERM); if (rtp->prio > RTP_PRIO_MAX) return (EINVAL); break; case RTP_PRIO_NORMAL: rtp->prio = 0; break; default: return (EINVAL); } } #ifdef RACCT if (racct_enable) { PROC_LOCK(p); error = racct_add(p, RACCT_NTHR, 1); PROC_UNLOCK(p); if (error != 0) return (EPROCLIM); } #endif /* Initialize our td */ error = kern_thr_alloc(p, 0, &newtd); if (error) goto fail; cpu_copy_thread(newtd, td); bzero(&newtd->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &newtd->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); newtd->td_proc = td->td_proc; newtd->td_rb_list = newtd->td_rbp_list = newtd->td_rb_inact = 0; thread_cow_get(newtd, td); error = initialize_thread(newtd, thunk); if (error != 0) { thread_cow_free(newtd); thread_free(newtd); goto fail; } PROC_LOCK(p); p->p_flag |= P_HADTHREADS; thread_link(newtd, p); bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); thread_lock(td); /* let the scheduler know about these things. */ sched_fork_thread(td, newtd); thread_unlock(td); if (P_SHOULDSTOP(p)) newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; if (p->p_ptevents & PTRACE_LWP) newtd->td_dbgflags |= TDB_BORN; PROC_UNLOCK(p); #ifdef HWPMC_HOOKS if (PMC_PROC_IS_USING_PMCS(p)) PMC_CALL_HOOK(newtd, PMC_FN_THR_CREATE, NULL); else if (PMC_SYSTEM_SAMPLING_ACTIVE()) PMC_CALL_HOOK_UNLOCKED(newtd, PMC_FN_THR_CREATE_LOG, NULL); #endif tidhash_add(newtd); thread_lock(newtd); if (rtp != NULL) { if (!(td->td_pri_class == PRI_TIMESHARE && rtp->type == RTP_PRIO_NORMAL)) { rtp_to_pri(rtp, newtd); sched_prio(newtd, newtd->td_user_pri); } /* ignore timesharing class */ } TD_SET_CAN_RUN(newtd); sched_add(newtd, SRQ_BORING); thread_unlock(newtd); return (0); fail: #ifdef RACCT if (racct_enable) { PROC_LOCK(p); racct_sub(p, RACCT_NTHR, 1); PROC_UNLOCK(p); } #endif return (error); }