void RemoveLevel(int rank) { RecursionStruct *pLevel = g_pLevel, *pTrailer = g_pLevel; while (pLevel) { if (pLevel->rank == rank) { if (pLevel == pTrailer) { if (g_pLevel == pLevel) g_pLevel = g_pLevel->next; pLevel = pLevel->next; fclose(pTrailer->fout); unlink(pTrailer->filename); MPIU_Free(pTrailer); pTrailer = pLevel; } else { pTrailer->next = pLevel->next; fclose(pLevel->fout); unlink(pLevel->filename); MPIU_Free(pLevel); pLevel = pTrailer->next; } } else { if (pTrailer != pLevel) pTrailer = pTrailer->next; pLevel = pLevel->next; } } }
void MPIU_Thread_cond_signal(MPIU_Thread_cond_t * cond, int * err) { MPIU_Thread_cond_fifo_t *fifo; MPIU_Thread_mutex_lock(&cond->fifo_mutex, err); if (err != NULL && *err != MPIU_THREAD_SUCCESS) { return; } fifo = cond->fifo_head; if (fifo) { cond->fifo_head = cond->fifo_head->next; if (cond->fifo_head == NULL) cond->fifo_tail = NULL; } MPIU_Thread_mutex_unlock(&cond->fifo_mutex, err); if (err != NULL && *err != MPIU_THREAD_SUCCESS) { return; } if (fifo) { if (!SetEvent(fifo->event) && err != NULL) { *err = GetLastError(); MPIU_Free(fifo); return; } MPIU_Free(fifo); } if (err != NULL) { *err = MPIU_THREAD_SUCCESS; } }
static void MPIR_T_enum_env_finalize(void) { int i, j; MPIR_T_enum_t *e; enum_item_t *item; if (enum_table) { /* Free all entries */ for (i = 0; i < utarray_len(enum_table); i++) { e = (MPIR_T_enum_t *)utarray_eltptr(enum_table, i); MPIU_Free((void *)e->name); /* Free items in this enum */ for (j = 0; j < utarray_len(e->items); j++) { item = (enum_item_t *)utarray_eltptr(e->items, j); MPIU_Free((void *)item->name); } utarray_free(e->items); } /* Free enum_table itself */ utarray_free(enum_table); enum_table = NULL; } }
int MPIDI_SHM_Win_free(MPID_Win **win_ptr) { static char FCNAME[] = "MPID_SHM_Win_free"; int rc; int mpi_errno = MPI_SUCCESS; /* Free shared memory region */ /* free shm_base_addrs that's only used for shared memory windows */ if ((*win_ptr)->mpid.shm->allocated) { OPA_fetch_and_add_int((OPA_int_t *) &((*win_ptr)->mpid.shm->ctrl->shm_count),-1); while((*win_ptr)->mpid.shm->ctrl->shm_count !=0) MPIDI_QUICKSLEEP; if ((*win_ptr)->comm_ptr->rank == 0) { MPIDI_SHM_MUTEX_DESTROY(*win_ptr); } #ifdef USE_SYSV_SHM mpi_errno = shmdt((*win_ptr)->mpid.shm->base_addr); if ((*win_ptr)->comm_ptr->rank == 0) { rc=shmctl((*win_ptr)->mpid.shm->shm_id,IPC_RMID,NULL); MPIU_ERR_CHKANDJUMP((rc == -1), errno,MPI_ERR_RMA_SHARED, "**shmctl"); } #elif USE_MMAP_SHM munmap ((*win_ptr)->mpid.shm->base_addr, (*win_ptr)->mpid.shm->segment_len); if (0 == (*win_ptr)->comm_ptr->rank) shm_unlink ((*win_ptr)->mpid.shm->shm_key); #else MPID_Abort(NULL, MPI_ERR_RMA_SHARED, -1, "MPI_Win_free error"); #endif } else {/* one task on a node */ MPIU_Free((*win_ptr)->mpid.shm->base_addr); } MPIU_Free((*win_ptr)->mpid.shm); (*win_ptr)->mpid.shm = NULL; fn_fail: return mpi_errno; }
int MPID_nem_ptl_rptl_ptfini(ptl_pt_index_t pt_index) { int i; int ret = PTL_OK; struct rptl *rptl; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_PTL_RPTL_PTFINI); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_PTL_RPTL_PTFINI); /* find the right rptl */ for (rptl = rptl_info.rptl_list; rptl && rptl->data.pt != pt_index; rptl = rptl->next); assert(rptl); /* free control portals that were created */ if (rptl->control.pt != PTL_PT_ANY) { for (i = 0; i < rptl_info.world_size * 2; i++) { ret = PtlMEUnlink(rptl->control.me[i]); RPTLU_ERR_POP(ret, "Error unlinking control buffers\n"); } MPIU_Free(rptl->control.me); } MPL_DL_DELETE(rptl_info.rptl_list, rptl); MPIU_Free(rptl); fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_PTL_RPTL_PTFINI); return ret; fn_fail: goto fn_exit; }
int smpd_spn_list_finalize(smpd_spn_list_hnd_t *spn_list_hnd_p) { smpd_host_spn_node_t *spn_list_head, *cur_node; smpd_enter_fn(FCNAME); if(spn_list_hnd_p == NULL){ smpd_err_printf("Invalid pointer to spn list handle\n"); smpd_exit_fn(FCNAME); return SMPD_FAIL; } if(!SMPD_SPN_LIST_HND_IS_INIT(*spn_list_hnd_p)){ smpd_dbg_printf("Trying to finalize an uninitialized handle\n"); smpd_exit_fn(FCNAME); return SMPD_FAIL; } spn_list_head = **spn_list_hnd_p; while(spn_list_head != NULL){ cur_node = spn_list_head; spn_list_head = cur_node->next; MPIU_Free(cur_node); } /* Free contents of the spn handle */ MPIU_Free(*spn_list_hnd_p); *spn_list_hnd_p = NULL; smpd_exit_fn(FCNAME); return SMPD_SUCCESS; }
void MPID_nem_ib_finalize_hash_table( MPID_nem_ib_hash_table_ptr_t table) { int i; MPID_nem_ib_hash_elem_ptr_t start_elem; MPID_nem_ib_hash_elem_ptr_t elem, next_elem; pthread_mutex_lock(&table->hash_table_lock); MPIU_Assert(table->entries != NULL); for(i = 0; i < table->num_entries; i++) { start_elem = &table->entries[i]; /* Walk through the list freeing * elements as we go */ elem = start_elem->next; while(elem != NULL) { next_elem = elem->next; MPIU_Free(elem); elem = next_elem; } } pthread_mutex_unlock(&table->hash_table_lock); MPIU_Free(table->entries); }
static void MPIR_T_cat_env_finalize(void) { int i; cat_table_entry_t *cat; if (cat_table) { /* Free all entries */ for (i = 0; i < utarray_len(cat_table); i++) { cat = (cat_table_entry_t *)utarray_eltptr(cat_table, i); MPIU_Free((void *)cat->name); MPIU_Free((void *)cat->desc); utarray_free(cat->cvar_indices); utarray_free(cat->pvar_indices); utarray_free(cat->subcat_indices); } /* Free cat_table itself */ utarray_free(cat_table); cat_table = NULL; } if (cat_hash) { name2index_hash_t *current, *tmp; /* Free all entries */ HASH_ITER(hh, cat_hash, current, tmp) { HASH_DEL(cat_hash, current); MPIU_Free(current); } /* Free cat_hash itself */ HASH_CLEAR(hh, cat_hash); cat_hash = NULL; }
int _mxm_handle_sreq(MPID_Request * req) { int complete = FALSE; MPID_nem_mxm_vc_area *vc_area = NULL; MPID_nem_mxm_req_area *req_area = NULL; vc_area = VC_BASE(req->ch.vc); req_area = REQ_BASE(req); _dbg_mxm_out_buf(req_area->iov_buf[0].ptr, (req_area->iov_buf[0].length > 16 ? 16 : req_area->iov_buf[0].length)); vc_area->pending_sends -= 1; if (req->dev.tmpbuf) { if (req->dev.datatype_ptr || req->ch.noncontig) { MPIU_Free(req->dev.tmpbuf); } } if (req_area->iov_count > MXM_MPICH_MAX_IOV) { MPIU_Free(req_area->iov_buf); req_area->iov_buf = req_area->tmp_buf; req_area->iov_count = 0; } MPIDI_CH3U_Handle_send_req(req->ch.vc, req, &complete); MPIU_Assert(complete == TRUE); return complete; }
static void MPIDI_Win_GetAccSendAckDoneCB(pami_context_t context, void * _info, pami_result_t result) { MPIDI_Win_GetAccMsgInfo *msginfo = (MPIDI_Win_GetAccMsgInfo *) _info; MPIU_Free(msginfo->tptr); MPIU_Free(msginfo); }
/* This routine is the finalize callback used to free the procable */ static int MPIR_FreeProctable( void *ptable ) { int i; MPIR_PROCDESC *proctable = (MPIR_PROCDESC *)ptable; for (i=0; i<MPIR_proctable_size; i++) { if (proctable[i].host_name) { MPIU_Free( proctable[i].host_name ); } } MPIU_Free( proctable ); return 0; }
int free_2level_comm (MPID_Comm* comm_ptr) { MPID_Comm *shmem_comm_ptr=NULL; MPID_Comm *leader_comm_ptr=NULL; MPID_Comm *allgather_comm_ptr=NULL; int local_rank=0; int mpi_errno=MPI_SUCCESS; if (comm_ptr->ch.leader_map != NULL) { MPIU_Free(comm_ptr->ch.leader_map); } if (comm_ptr->ch.leader_rank != NULL) { MPIU_Free(comm_ptr->ch.leader_rank); } MPID_Comm_get_ptr((comm_ptr->ch.shmem_comm), shmem_comm_ptr ); MPID_Comm_get_ptr((comm_ptr->ch.leader_comm), leader_comm_ptr ); if(comm_ptr->ch.allgather_comm_ok == 1) { MPID_Comm_get_ptr((comm_ptr->ch.allgather_comm), allgather_comm_ptr ); MPIU_Free(comm_ptr->ch.allgather_new_ranks); } local_rank = shmem_comm_ptr->rank; if(local_rank == 0) { if(comm_ptr->ch.node_sizes != NULL) { MPIU_Free(comm_ptr->ch.node_sizes); } } if (local_rank == 0 && leader_comm_ptr != NULL) { mpi_errno = MPIR_Comm_release(leader_comm_ptr, 0); if (mpi_errno != MPI_SUCCESS) { goto fn_fail; } } if (shmem_comm_ptr != NULL) { mpi_errno = MPIR_Comm_release(shmem_comm_ptr, 0); if (mpi_errno != MPI_SUCCESS) { goto fn_fail; } } if (allgather_comm_ptr != NULL) { mpi_errno = MPIR_Comm_release(allgather_comm_ptr, 0); if (mpi_errno != MPI_SUCCESS) { goto fn_fail; } } clear_2level_comm(comm_ptr); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPID_NS_Free( MPID_NS_Handle *handle_ptr ) { static const char FCNAME[] = "MPID_NS_Free"; int err; MPIU_Free( (*handle_ptr)->kvsname ); MPIU_Free( *handle_ptr ); *handle_ptr = 0; return 0; }
static int reinit_pmi(void) { int ret; int has_parent = 0; int pg_rank, pg_size; int kvs_name_sz, pg_id_sz; MPIDI_STATE_DECL(MPID_STATE_REINIT_PMI); MPIDI_FUNC_ENTER(MPID_STATE_REINIT_PMI); /* Init pmi and do some sanity checks */ ret = PMI_Init(&has_parent); CHECK_ERR(ret, "pmi_init"); ret = PMI_Get_rank(&pg_rank); CHECK_ERR(ret, "pmi_get_rank"); ret = PMI_Get_size(&pg_size); CHECK_ERR(ret, "pmi_get_size"); CHECK_ERR(pg_size != MPIDI_Process.my_pg->size, "pg size differs after restart"); CHECK_ERR(pg_rank != MPIDI_Process.my_pg_rank, "pg rank differs after restart"); /* get new pg_id */ ret = PMI_KVS_Get_name_length_max(&pg_id_sz); CHECK_ERR(ret, "pmi_get_id_length_max"); MPIU_Free(MPIDI_Process.my_pg->id); MPIDI_Process.my_pg->id = MPIU_Malloc(pg_id_sz + 1); CHECK_ERR(MPIDI_Process.my_pg->id == NULL, "malloc failed"); ret = PMI_KVS_Get_my_name(MPIDI_Process.my_pg->id, pg_id_sz); CHECK_ERR(ret, "pmi_kvs_get_my_name"); /* get new kvsname */ ret = PMI_KVS_Get_name_length_max(&kvs_name_sz); CHECK_ERR(ret, "PMI_KVS_Get_name_length_max"); MPIU_Free(MPIDI_Process.my_pg->connData); MPIDI_Process.my_pg->connData = MPIU_Malloc(kvs_name_sz + 1); CHECK_ERR(MPIDI_Process.my_pg->connData == NULL, "malloc failed"); ret = PMI_KVS_Get_my_name(MPIDI_Process.my_pg->connData, kvs_name_sz); CHECK_ERR(ret, "PMI_Get_my_name"); MPIDI_FUNC_EXIT(MPID_STATE_REINIT_PMI); return 0; }
void MPIDI_close_pe_extension() { extern MPIDI_printenv_t *mpich_env; extern MPIX_stats_t *mpid_statp; int rc; /* PAMI_Extension_open in pami_init */ rc = PAMI_Extension_close (pe_extension); if (rc != PAMI_SUCCESS) { TRACE_ERR("ERROR close PAMI_Extension failed rc %d", rc); } if (mpich_env) MPIU_Free(mpich_env); if (mpid_statp) MPIU_Free(mpid_statp); }
/* This routine is called by finalize when MPI exits */ static int MPIU_Handle_free( void *((*indirect)[]), int indirect_size ) { int i; /* Remove any allocated storage */ for (i=0; i<indirect_size; i++) { MPIU_Free( (*indirect)[i] ); } if (indirect) { MPIU_Free( indirect ); } /* This does *not* remove any objects that the user created and then did not destroy */ return 0; }
int smpd_dbs_finalize() { smpd_database_node_t *pNode, *pNext; smpd_database_element_t *pElement; smpd_enter_fn(FCNAME); smpd_process.nInitDBSRefCount--; if (smpd_process.nInitDBSRefCount == 0) { #ifdef USE_WIN_MUTEX_PROTECT WaitForSingleObject(smpd_process.hDBSMutex, INFINITE); #endif /* FIXME: Should this cleanup be done here ? * This lets one forget to do a smpd_dbs_destroy() for * every smpd_dbs_create() */ pNode = smpd_process.pDatabase; while (pNode) { pNext = pNode->pNext; while (pNode->pData) { pElement = pNode->pData; pNode->pData = pNode->pData->pNext; MPIU_Free(pElement); } MPIU_Free(pNode); pNode = pNext; } smpd_process.pDatabase = NULL; smpd_process.pDatabaseIter = NULL; #ifdef USE_WIN_MUTEX_PROTECT ReleaseMutex(smpd_process.hDBSMutex); CloseHandle(smpd_process.hDBSMutex); smpd_process.hDBSMutex = NULL; #endif } smpd_exit_fn(FCNAME); return SMPD_DBS_SUCCESS; }
int MPID_nem_newmad_anysource_matched(MPID_Request *rreq) { /* This function is called when an anysource request in the posted receive queue is matched and dequeued */ nm_sr_request_t *nmad_request = NULL; int ret; int matched = FALSE; #ifdef DEBUG fprintf(stdout,"========> Any Source : MPID_nem_newmad_anysource_matched , req is %p\n",rreq); #endif MPID_NEM_NMAD_GET_REQ_FROM_HASH(rreq,nmad_request); if(nmad_request != NULL) { #ifdef DEBUG fprintf(stdout,"========> Any Source nmad req found :%p \n",nmad_request); #endif ret = nm_sr_rcancel(mpid_nem_newmad_session,nmad_request); if (ret != NM_ESUCCESS) { #ifdef DEBUG fprintf(stdout,"========> Any Source nmad req (%p) not cancelled \n",nmad_request); #endif size_t size; nm_tag_t match_info; MPIU_Assert(MPIDI_Request_get_type(rreq) != MPIDI_REQUEST_TYPE_GET_RESP); ret = nm_sr_rwait(mpid_nem_newmad_session,nmad_request); MPIU_Assert(ret == NM_ESUCCESS); nm_sr_request_unset_completion_queue(mpid_nem_newmad_session,nmad_request); nm_sr_get_rtag(mpid_nem_newmad_session,nmad_request,&match_info); nm_sr_get_size(mpid_nem_newmad_session,nmad_request,&size); MPID_nem_newmad_handle_rreq(rreq,match_info, size); matched = TRUE; } else { MPID_Segment_free(rreq->dev.segment_ptr); if (REQ_FIELD(rreq,iov) != NULL) MPIU_Free(REQ_FIELD(rreq,iov)); } MPIU_Free(nmad_request); } return matched; }
IRLOG_IOStruct *IRLOG_CreateOutputStruct(const char *filename) { IRLOG_IOStruct *pOutput = NULL; /* allocate a data structure */ pOutput = (IRLOG_IOStruct*)MPIU_Malloc(sizeof(IRLOG_IOStruct)); if (pOutput == NULL) { MPIU_Error_printf("malloc failed - %s\n", strerror(errno)); return NULL; } /* open the output clog file */ pOutput->f = fopen(filename, "wb"); if (pOutput->f == NULL) { MPIU_Error_printf("Unable to open output file '%s' - %s\n", filename, strerror(errno)); MPIU_Free(pOutput); return NULL; } /* set all the data fields */ pOutput->header.type = RLOG_INVALID_TYPE; pOutput->pCurHeader = pOutput->buffer; pOutput->pNextHeader = pOutput->buffer; pOutput->pEnd = &pOutput->buffer[RLOG_BUFFSIZE]; return pOutput; }
int IRLOG_CloseInputStruct(IRLOG_IOStruct **ppInput) { fclose((*ppInput)->f); MPIU_Free(*ppInput); *ppInput = NULL; return 0; }
int MPID_nem_gm_finalize() { int mpi_errno = MPI_SUCCESS; int max_send_tokens; MPID_nem_gm_send_queue_t *e; max_send_tokens = gm_num_send_tokens (MPID_nem_module_gm_port); while (MPID_nem_module_gm_num_send_tokens < max_send_tokens && !MPID_nem_gm_queue_empty (send)) { mpi_errno = MPID_nem_gm_recv_poll(); if (mpi_errno) MPIU_ERR_POP (mpi_errno); } while (MPID_nem_gm_send_free_queue) { e = MPID_nem_gm_send_free_queue; MPID_nem_gm_send_free_queue = e->next; MPIU_Free (e); } mpi_errno = MPID_nem_gm_lmt_finalize(); if (mpi_errno) MPIU_ERR_POP (mpi_errno); gm_finalize(); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int smpd_hpc_js_finalize(smpd_hpc_js_ctxt_t *pctxt) { int result; smpd_enter_fn(FCNAME); if(pctxt == NULL){ smpd_err_printf("ERROR: Invalid pointer to js ctxt\n"); smpd_exit_fn(FCNAME); return SMPD_FAIL; } if(*pctxt == NULL){ smpd_dbg_printf("Null js handle\n"); smpd_exit_fn(FCNAME); return SMPD_SUCCESS; } /* Release the job scheduler object */ if((*pctxt)->pscheduler){ ((*pctxt)->pscheduler)->Release(); (*pctxt)->pscheduler = NULL; } /* Free the job scheduler handle */ MPIU_Free(*pctxt); *pctxt = NULL; smpd_exit_fn(FCNAME); return SMPD_SUCCESS; }
/* destroy ud context */ void mv2_ud_destroy_ctx (mv2_ud_ctx_t *ctx) { if (ctx->qp) { ibv_destroy_qp(ctx->qp); } MPIU_Free(ctx); }
void MPID_nem_newmad_handle_sreq(MPID_Request *req) { int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *); #ifdef DEBUG fprintf(stdout,"========> Completing Send req %p \n",req); #endif reqFn = req->dev.OnDataAvail; if (!reqFn){ MPIDI_CH3U_Request_complete(req); MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, ".... complete"); } else{ MPIDI_VC_t *vc = req->ch.vc; int complete = 0; reqFn(vc, req, &complete); if(!complete) { MPIU_Assert(complete == TRUE); } } if (REQ_FIELD(req,iov) != NULL) MPIU_Free((REQ_FIELD(req,iov))); mpid_nem_newmad_pending_send_req--; }
int MPIDI_CH3_SHM_Win_free(MPID_Win **win_ptr) { int mpi_errno = MPI_SUCCESS; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_SHM_WIN_FREE); MPIDI_RMA_FUNC_ENTER(MPID_STATE_MPIDI_CH3_SHM_WIN_FREE); /* Free shared memory region */ if ((*win_ptr)->shm_allocated) { /* free shm_base_addrs that's only used for shared memory windows */ MPIU_Free((*win_ptr)->shm_base_addrs); /* detach from shared memory segment */ mpi_errno = MPIU_SHMW_Seg_detach((*win_ptr)->shm_segment_handle, (char **)&(*win_ptr)->shm_base_addr, (*win_ptr)->shm_segment_len); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPIU_SHMW_Hnd_finalize(&(*win_ptr)->shm_segment_handle); } mpi_errno = MPIDI_Win_free(win_ptr); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_POP(mpi_errno); } fn_exit: MPIDI_RMA_FUNC_EXIT(MPID_STATE_MPIDI_CH3_SHM_WIN_FREE); return mpi_errno; fn_fail: goto fn_exit; }
int smpd_free_sspi_client_context(smpd_sspi_client_context_t **context) { smpd_sspi_client_context_t *iter, *trailer; smpd_enter_fn(FCNAME); trailer = iter = smpd_process.sspi_context_list; while (iter) { if (iter == *context) { if (trailer != iter) { trailer->next = iter->next; } else { smpd_process.sspi_context_list = iter->next; } break; } if (trailer != iter) trailer = trailer->next; iter = iter->next; } if (iter == NULL) { smpd_dbg_printf("freeing a sspi_client_context not in the global list\n"); } /* FIXME: cleanup sspi structures */ MPIU_Free(*context); *context = NULL; smpd_exit_fn(FCNAME); return SMPD_SUCCESS; }
static int handler_recv_complete(const ptl_event_t *e) { int mpi_errno = MPI_SUCCESS; MPID_Request *const rreq = e->user_ptr; int ret; int i; MPIDI_STATE_DECL(MPID_STATE_HANDLER_RECV_COMPLETE); MPIDI_FUNC_ENTER(MPID_STATE_HANDLER_RECV_COMPLETE); MPIU_Assert(e->type == PTL_EVENT_REPLY || e->type == PTL_EVENT_PUT || e->type == PTL_EVENT_PUT_OVERFLOW); if (REQ_PTL(rreq)->md != PTL_INVALID_HANDLE) { ret = PtlMDRelease(REQ_PTL(rreq)->md); MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlmdrelease", "**ptlmdrelease %s", MPID_nem_ptl_strerror(ret)); } for (i = 0; i < MPID_NEM_PTL_NUM_CHUNK_BUFFERS; ++i) if (REQ_PTL(rreq)->chunk_buffer[i]) MPIU_Free(REQ_PTL(rreq)->chunk_buffer[i]); mpi_errno = MPID_Request_complete(rreq); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_HANDLER_RECV_COMPLETE); return mpi_errno; fn_fail: goto fn_exit; }
void MPIU_Thread_cond_broadcast(MPIU_Thread_cond_t * cond, int * err) { MPIU_Thread_cond_fifo_t *fifo, *temp; MPIU_Thread_mutex_lock(&cond->fifo_mutex, err); if (err != NULL && *err != MPIU_THREAD_SUCCESS) { return; } /* remove the fifo queue from the cond variable */ fifo = cond->fifo_head; cond->fifo_head = cond->fifo_tail = NULL; MPIU_Thread_mutex_unlock(&cond->fifo_mutex, err); if (err != NULL && *err != MPIU_THREAD_SUCCESS) { return; } /* signal each event in the fifo queue */ while (fifo) { if (!SetEvent(fifo->event) && err != NULL) { *err = GetLastError(); /* lost memory */ return; } temp = fifo; fifo = fifo->next; MPIU_Free(temp); } if (err != NULL) { *err = MPIU_THREAD_SUCCESS; } }
int IRLOG_CloseOutputStruct(IRLOG_IOStruct **ppOutput) { WriteFileData((*ppOutput)->buffer, (int)((*ppOutput)->pCurHeader - (*ppOutput)->buffer), (*ppOutput)->f); fclose((*ppOutput)->f); MPIU_Free(*ppOutput); *ppOutput = NULL; return 0; }
int MPID_nem_ptl_rptl_drain_eq(int eq_count, ptl_handle_eq_t *eq) { int ret = PTL_OK; ptl_event_t event; struct rptl_op_pool_segment *op_segment; int i; struct rptl_target *target, *t; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_PTL_RPTL_FINALIZE); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_PTL_RPTL_FINALIZE); for (target = rptl_info.target_list; target; target = target->next) { while (target->control_op_list || target->data_op_list) { for (i = 0; i < eq_count; i++) { /* read and ignore all events */ ret = MPID_nem_ptl_rptl_eqget(eq[i], &event); if (ret == PTL_EQ_EMPTY) ret = PTL_OK; RPTLU_ERR_POP(ret, "Error calling MPID_nem_ptl_rptl_eqget\n"); } } } for (target = rptl_info.target_list; target;) { assert(target->data_op_list == NULL); assert(target->control_op_list == NULL); while (target->op_segment_list) { op_segment = target->op_segment_list; MPL_DL_DELETE(target->op_segment_list, op_segment); MPIU_Free(op_segment); } t = target->next; MPIU_Free(target); target = t; } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_PTL_RPTL_FINALIZE); return ret; fn_fail: goto fn_exit; }