int MPIR_Init_async_thread(void) { #if MPICH_THREAD_LEVEL == MPI_THREAD_MULTIPLE int mpi_errno = MPI_SUCCESS; MPIR_Comm *comm_self_ptr; int err = 0; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPIR_INIT_ASYNC_THREAD); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPIR_INIT_ASYNC_THREAD); /* Dup comm world for the progress thread */ MPIR_Comm_get_ptr(MPI_COMM_SELF, comm_self_ptr); mpi_errno = MPIR_Comm_dup_impl(comm_self_ptr, &progress_comm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPID_Thread_cond_create(&progress_cond, &err); MPIR_ERR_CHKANDJUMP1(err, mpi_errno, MPI_ERR_OTHER, "**cond_create", "**cond_create %s", strerror(err)); MPID_Thread_mutex_create(&progress_mutex, &err); MPIR_ERR_CHKANDJUMP1(err, mpi_errno, MPI_ERR_OTHER, "**mutex_create", "**mutex_create %s", strerror(err)); MPID_Thread_create((MPID_Thread_func_t) progress_fn, NULL, &progress_thread_id, &err); MPIR_ERR_CHKANDJUMP1(err, mpi_errno, MPI_ERR_OTHER, "**mutex_create", "**mutex_create %s", strerror(err)); MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPIR_INIT_ASYNC_THREAD); fn_exit: return mpi_errno; fn_fail: goto fn_exit; #else return MPI_SUCCESS; #endif /* MPICH_THREAD_LEVEL == MPI_THREAD_MULTIPLE */ }
/* Fully initialize a VC. This invokes the channel-specific VC initialization routine MPIDI_CH3_VC_Init . */ int MPIDI_VC_Init( MPIDI_VC_t *vc, MPIDI_PG_t *pg, int rank ) { vc->state = MPIDI_VC_STATE_INACTIVE; vc->handle = HANDLE_SET_MPI_KIND(0, MPID_VCONN); MPIU_Object_set_ref(vc, 0); vc->pg = pg; vc->pg_rank = rank; vc->lpid = lpid_counter++; vc->node_id = -1; MPIDI_VC_Init_seqnum_send(vc); MPIDI_VC_Init_seqnum_recv(vc); vc->rndvSend_fn = MPIDI_CH3_RndvSend; vc->rndvRecv_fn = MPIDI_CH3_RecvRndv; vc->ready_eager_max_msg_sz = -1; /* no limit */; vc->eager_max_msg_sz = MPIR_CVAR_CH3_EAGER_MAX_MSG_SIZE; vc->sendNoncontig_fn = MPIDI_CH3_SendNoncontig_iov; #ifdef ENABLE_COMM_OVERRIDES vc->comm_ops = NULL; #endif /* FIXME: We need a better abstraction for initializing the thread state for an object */ #if MPICH_THREAD_GRANULARITY == MPICH_THREAD_GRANULARITY_PER_OBJECT { int err; MPID_Thread_mutex_create(&vc->pobj_mutex,&err); MPIU_Assert(err == 0); } #endif /* MPICH_THREAD_GRANULARITY */ MPIDI_CH3_VC_Init(vc); MPIDI_DBG_PrintVCState(vc); return MPI_SUCCESS; }
static int MPIR_Thread_CS_Init( void ) { int err; MPIU_THREADPRIV_DECL; MPIU_Assert(MPICH_MAX_LOCKS >= MPIU_Nest_NUM_MUTEXES); /* we create this at all granularities right now */ MPID_Thread_mutex_create(&MPIR_ThreadInfo.memalloc_mutex, &err); MPIU_Assert(err == 0); /* must come after memalloc_mutex creation */ MPIU_THREADPRIV_INITKEY; MPIU_THREADPRIV_INIT; #if MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_GLOBAL /* There is a single, global lock, held for the duration of an MPI call */ MPID_Thread_mutex_create(&MPIR_ThreadInfo.global_mutex, &err); MPIU_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_ThreadInfo.handle_mutex, &err); MPIU_Assert(err == 0); #elif MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_PER_OBJECT /* MPIU_THREAD_GRANULARITY_PER_OBJECT: Multiple locks */ MPID_Thread_mutex_create(&MPIR_ThreadInfo.global_mutex, &err); MPIU_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_ThreadInfo.handle_mutex, &err); MPIU_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_ThreadInfo.msgq_mutex, &err); MPIU_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_ThreadInfo.completion_mutex, &err); MPIU_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_ThreadInfo.ctx_mutex, &err); MPIU_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_ThreadInfo.pmi_mutex, &err); MPIU_Assert(err == 0); #elif MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_LOCK_FREE /* Updates to shared data and access to shared services is handled without locks where ever possible. */ #error lock-free not yet implemented #elif MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_SINGLE /* No thread support, make all operations a no-op */ #else #error Unrecognized thread granularity #endif MPIU_DBG_MSG(THREAD,TYPICAL,"Created global mutex and private storage"); return MPI_SUCCESS; }
/* These routine handle any thread initialization that my be required */ static int thread_cs_init(void) { int err; #if MPICH_THREAD_GRANULARITY == MPICH_THREAD_GRANULARITY__GLOBAL /* There is a single, global lock, held for the duration of an MPI call */ MPID_Thread_mutex_create(&MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX, &err); MPIR_Assert(err == 0); #elif MPICH_THREAD_GRANULARITY == MPICH_THREAD_GRANULARITY__POBJ /* MPICH_THREAD_GRANULARITY__POBJ: Multiple locks */ MPID_Thread_mutex_create(&MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX, &err); MPIR_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_THREAD_POBJ_HANDLE_MUTEX, &err); MPIR_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_THREAD_POBJ_MSGQ_MUTEX, &err); MPIR_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_THREAD_POBJ_COMPLETION_MUTEX, &err); MPIR_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_THREAD_POBJ_CTX_MUTEX, &err); MPIR_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_THREAD_POBJ_PMI_MUTEX, &err); MPIR_Assert(err == 0); #elif MPICH_THREAD_GRANULARITY == MPICH_THREAD_GRANULARITY__VCI MPID_Thread_mutex_create(&MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX, &err); MPIR_Assert(err == 0); MPID_Thread_mutex_create(&MPIR_THREAD_POBJ_HANDLE_MUTEX, &err); MPIR_Assert(err == 0); #elif MPICH_THREAD_GRANULARITY == MPICH_THREAD_GRANULARITY__LOCKFREE /* Updates to shared data and access to shared services is handled without locks where ever possible. */ #error lock-free not yet implemented #elif MPICH_THREAD_GRANULARITY == MPICH_THREAD_GRANULARITY__SINGLE /* No thread support, make all operations a no-op */ #else #error Unrecognized thread granularity #endif MPID_THREADPRIV_KEY_CREATE; MPL_DBG_MSG(MPIR_DBG_INIT, TYPICAL, "Created global mutex and private storage"); return MPI_SUCCESS; }