/********************************************************************** * * Control message communication * **********************************************************************/ static void ompi_osc_pt2pt_control_send_cb(ompi_osc_pt2pt_buffer_t *buffer) { /* release the descriptor and sendreq */ OPAL_FREE_LIST_RETURN(&mca_osc_pt2pt_component.p2p_c_buffers, &buffer->super); }
static void ompi_osc_pt2pt_sendreq_send_cb(ompi_osc_pt2pt_buffer_t *buffer) { ompi_osc_pt2pt_sendreq_t *sendreq = (ompi_osc_pt2pt_sendreq_t*) buffer->cbdata; ompi_osc_pt2pt_send_header_t *header = (ompi_osc_pt2pt_send_header_t*) buffer->payload; /* have to look at header, and not the sendreq because in the case of get, it's possible that the sendreq has been freed already (if the remote side replies before we get our send completion callback) and already allocated to another request. We don't wait for this completion before exiting a synchronization point in the case of get, as we really don't care when it completes - only when the data arrives. */ if (OMPI_OSC_PT2PT_HDR_GET != header->hdr_base.hdr_type) { #if !defined(WORDS_BIGENDIAN) && OMPI_ENABLE_HETEROGENEOUS_SUPPORT if (header->hdr_base.hdr_flags & OMPI_OSC_PT2PT_HDR_FLAG_NBO) { OMPI_OSC_PT2PT_SEND_HDR_NTOH(*header); } #endif /* do we need to post a send? */ if (header->hdr_msg_length != 0) { /* sendreq is done. Mark it as so and get out of here */ OPAL_THREAD_ADD32(&(sendreq->req_module->p2p_num_pending_out), -1); ompi_osc_pt2pt_sendreq_free(sendreq); } else { ompi_osc_pt2pt_longreq_t *longreq; ompi_osc_pt2pt_longreq_alloc(&longreq); longreq->req_comp_cb = ompi_osc_pt2pt_sendreq_send_long_cb; longreq->req_comp_cbdata = sendreq; opal_output_verbose(50, ompi_osc_base_output, "%d starting long sendreq to %d (%d)", sendreq->req_module->p2p_comm->c_my_rank, sendreq->req_target_rank, header->hdr_origin_tag); mca_pml.pml_isend(sendreq->req_origin_convertor.pBaseBuf, sendreq->req_origin_convertor.count, sendreq->req_origin_datatype, sendreq->req_target_rank, header->hdr_origin_tag, MCA_PML_BASE_SEND_STANDARD, sendreq->req_module->p2p_comm, &(longreq->req_pml_req)); /* put the send request in the waiting list */ OPAL_THREAD_LOCK(&(sendreq->req_module->p2p_lock)); opal_list_append(&(sendreq->req_module->p2p_long_msgs), &(longreq->super.super)); OPAL_THREAD_UNLOCK(&(sendreq->req_module->p2p_lock)); } } /* release the buffer */ OPAL_FREE_LIST_RETURN(&mca_osc_pt2pt_component.p2p_c_buffers, &buffer->super); }
static void releaseBuffer(void *ptr, ompi_java_buffer_t *item) { if(item == NULL) { free(ptr); } else { assert(item->buffer == ptr); OPAL_FREE_LIST_RETURN(&ompi_java_buffers, (opal_free_list_item_t*)item); } }
static void ompi_osc_pt2pt_replyreq_send_cb(ompi_osc_pt2pt_buffer_t *buffer) { ompi_osc_pt2pt_replyreq_t *replyreq = (ompi_osc_pt2pt_replyreq_t*) buffer->cbdata; ompi_osc_pt2pt_reply_header_t *header = (ompi_osc_pt2pt_reply_header_t*) buffer->payload; #if !defined(WORDS_BIGENDIAN) && OMPI_ENABLE_HETEROGENEOUS_SUPPORT if (header->hdr_base.hdr_flags & OMPI_OSC_PT2PT_HDR_FLAG_NBO) { OMPI_OSC_PT2PT_REPLY_HDR_NTOH(*header); } #endif /* do we need to post a send? */ if (header->hdr_msg_length != 0) { /* sendreq is done. Mark it as so and get out of here */ OPAL_THREAD_ADD32(&(replyreq->rep_module->p2p_num_pending_in), -1); ompi_osc_pt2pt_replyreq_free(replyreq); } else { ompi_osc_pt2pt_longreq_t *longreq; ompi_osc_pt2pt_longreq_alloc(&longreq); longreq->req_comp_cb = ompi_osc_pt2pt_replyreq_send_long_cb; longreq->req_comp_cbdata = replyreq; mca_pml.pml_isend(replyreq->rep_target_convertor.pBaseBuf, replyreq->rep_target_convertor.count, replyreq->rep_target_datatype, replyreq->rep_origin_rank, header->hdr_target_tag, MCA_PML_BASE_SEND_STANDARD, replyreq->rep_module->p2p_comm, &(longreq->req_pml_req)); /* put the send request in the waiting list */ OPAL_THREAD_LOCK(&(replyreq->rep_module->p2p_lock)); opal_list_append(&(replyreq->rep_module->p2p_long_msgs), &(longreq->super.super)); OPAL_THREAD_UNLOCK(&(replyreq->rep_module->p2p_lock)); } /* release the descriptor and replyreq */ OPAL_FREE_LIST_RETURN(&mca_osc_pt2pt_component.p2p_c_buffers, &buffer->super); }
int ompi_osc_pt2pt_control_send(ompi_osc_pt2pt_module_t *module, ompi_proc_t *proc, uint8_t type, int32_t value0, int32_t value1) { int ret = OMPI_SUCCESS; opal_free_list_item_t *item; ompi_osc_pt2pt_buffer_t *buffer = NULL; ompi_osc_pt2pt_control_header_t *header = NULL; int rank = -1, i; /* find the rank */ for (i = 0 ; i < module->p2p_comm->c_remote_group->grp_proc_count ; ++i) { if (proc == module->p2p_comm->c_remote_group->grp_proc_pointers[i]) { rank = i; } } /* Get a buffer */ OPAL_FREE_LIST_GET(&mca_osc_pt2pt_component.p2p_c_buffers, item, ret); if (NULL == item) { ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE; goto cleanup; } buffer = (ompi_osc_pt2pt_buffer_t*) item; /* verify at least enough space for header */ if (mca_osc_pt2pt_component.p2p_c_eager_size < sizeof(ompi_osc_pt2pt_control_header_t)) { ret = OMPI_ERR_OUT_OF_RESOURCE; goto cleanup; } /* setup buffer */ buffer->cbfunc = ompi_osc_pt2pt_control_send_cb; buffer->cbdata = NULL; buffer->len = sizeof(ompi_osc_pt2pt_control_header_t); /* pack header */ header = (ompi_osc_pt2pt_control_header_t*) buffer->payload; header->hdr_base.hdr_type = type; header->hdr_base.hdr_flags = 0; header->hdr_value[0] = value0; header->hdr_value[1] = value1; header->hdr_windx = module->p2p_comm->c_contextid; #ifdef WORDS_BIGENDIAN header->hdr_base.hdr_flags |= OMPI_OSC_PT2PT_HDR_FLAG_NBO; #elif OMPI_ENABLE_HETEROGENEOUS_SUPPORT if (proc->proc_arch & OMPI_ARCH_ISBIGENDIAN) { header->hdr_base.hdr_flags |= OMPI_OSC_PT2PT_HDR_FLAG_NBO; OMPI_OSC_PT2PT_CONTROL_HDR_HTON(*header); } #endif /* send fragment */ ret = MCA_PML_CALL(isend(buffer->payload, buffer->len, MPI_BYTE, rank, -200, MCA_PML_BASE_SEND_STANDARD, module->p2p_comm, &buffer->request)); opal_list_append(&module->p2p_pending_control_sends, &buffer->super.super); goto done; cleanup: if (item != NULL) { OPAL_FREE_LIST_RETURN(&mca_osc_pt2pt_component.p2p_c_buffers, item); } done: return ret; }
int ompi_osc_pt2pt_replyreq_send(ompi_osc_pt2pt_module_t *module, ompi_osc_pt2pt_replyreq_t *replyreq) { int ret = OMPI_SUCCESS; opal_free_list_item_t *item; ompi_osc_pt2pt_buffer_t *buffer = NULL; ompi_osc_pt2pt_reply_header_t *header = NULL; size_t written_data = 0; /* Get a buffer */ OPAL_FREE_LIST_GET(&mca_osc_pt2pt_component.p2p_c_buffers, item, ret); if (NULL == item) { ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE; goto cleanup; } buffer = (ompi_osc_pt2pt_buffer_t*) item; /* verify at least enough space for header */ if (mca_osc_pt2pt_component.p2p_c_eager_size < sizeof(ompi_osc_pt2pt_reply_header_t)) { ret = OMPI_ERR_OUT_OF_RESOURCE; goto cleanup; } /* setup buffer */ buffer->cbfunc = ompi_osc_pt2pt_replyreq_send_cb; buffer->cbdata = (void*) replyreq; /* pack header */ header = (ompi_osc_pt2pt_reply_header_t*) buffer->payload; written_data += sizeof(ompi_osc_pt2pt_reply_header_t); header->hdr_base.hdr_type = OMPI_OSC_PT2PT_HDR_REPLY; header->hdr_base.hdr_flags = 0; header->hdr_origin_sendreq = replyreq->rep_origin_sendreq; header->hdr_target_tag = 0; /* if sending data fits, pack payload */ if (mca_osc_pt2pt_component.p2p_c_eager_size >= written_data + replyreq->rep_target_bytes_packed) { struct iovec iov; uint32_t iov_count = 1; size_t max_data = replyreq->rep_target_bytes_packed; iov.iov_len = max_data; iov.iov_base = (IOVBASE_TYPE*)((unsigned char*) buffer->payload + written_data); ret = ompi_convertor_pack(&replyreq->rep_target_convertor, &iov, &iov_count, &max_data ); if (ret < 0) { ret = OMPI_ERR_FATAL; goto cleanup; } assert(max_data == replyreq->rep_target_bytes_packed); written_data += max_data; header->hdr_msg_length = replyreq->rep_target_bytes_packed; } else { header->hdr_msg_length = 0; header->hdr_target_tag = create_send_tag(module); } buffer->len = written_data; #ifdef WORDS_BIGENDIAN header->hdr_base.hdr_flags |= OMPI_OSC_PT2PT_HDR_FLAG_NBO; #elif OMPI_ENABLE_HETEROGENEOUS_SUPPORT if (replyreq->rep_origin_proc->proc_arch & OMPI_ARCH_ISBIGENDIAN) { header->hdr_base.hdr_flags |= OMPI_OSC_PT2PT_HDR_FLAG_NBO; OMPI_OSC_PT2PT_REPLY_HDR_HTON(*header); } #endif /* send fragment */ ret = MCA_PML_CALL(isend(buffer->payload, buffer->len, MPI_BYTE, replyreq->rep_origin_rank, -200, MCA_PML_BASE_SEND_STANDARD, module->p2p_comm, &buffer->request)); opal_list_append(&module->p2p_pending_control_sends, &buffer->super.super); goto done; cleanup: if (item != NULL) { OPAL_FREE_LIST_RETURN(&mca_osc_pt2pt_component.p2p_c_buffers, item); } done: return ret; }
/* create the initial fragment, pack header, datatype, and payload (if size fits) and send */ int ompi_osc_pt2pt_sendreq_send(ompi_osc_pt2pt_module_t *module, ompi_osc_pt2pt_sendreq_t *sendreq) { int ret = OMPI_SUCCESS; opal_free_list_item_t *item; ompi_osc_pt2pt_send_header_t *header = NULL; ompi_osc_pt2pt_buffer_t *buffer = NULL; size_t written_data = 0; size_t needed_len = sizeof(ompi_osc_pt2pt_send_header_t); const void *packed_ddt; size_t packed_ddt_len = ompi_ddt_pack_description_length(sendreq->req_target_datatype); /* we always need to send the ddt */ needed_len += packed_ddt_len; if (OMPI_OSC_PT2PT_GET != sendreq->req_type) { needed_len += sendreq->req_origin_bytes_packed; } /* Get a buffer */ OPAL_FREE_LIST_GET(&mca_osc_pt2pt_component.p2p_c_buffers, item, ret); if (NULL == item) { ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE; goto cleanup; } buffer = (ompi_osc_pt2pt_buffer_t*) item; /* verify at least enough space for header */ if (mca_osc_pt2pt_component.p2p_c_eager_size < sizeof(ompi_osc_pt2pt_send_header_t)) { ret = OMPI_ERR_OUT_OF_RESOURCE; goto cleanup; } /* setup buffer */ buffer->cbfunc = ompi_osc_pt2pt_sendreq_send_cb; buffer->cbdata = (void*) sendreq; /* pack header */ header = (ompi_osc_pt2pt_send_header_t*) buffer->payload; written_data += sizeof(ompi_osc_pt2pt_send_header_t); header->hdr_base.hdr_flags = 0; header->hdr_windx = sendreq->req_module->p2p_comm->c_contextid; header->hdr_origin = sendreq->req_module->p2p_comm->c_my_rank; header->hdr_origin_sendreq.pval = (void*) sendreq; header->hdr_origin_tag = 0; header->hdr_target_disp = sendreq->req_target_disp; header->hdr_target_count = sendreq->req_target_count; switch (sendreq->req_type) { case OMPI_OSC_PT2PT_PUT: header->hdr_base.hdr_type = OMPI_OSC_PT2PT_HDR_PUT; #if OMPI_ENABLE_MEM_DEBUG header->hdr_target_op = 0; #endif break; case OMPI_OSC_PT2PT_ACC: header->hdr_base.hdr_type = OMPI_OSC_PT2PT_HDR_ACC; header->hdr_target_op = sendreq->req_op_id; break; case OMPI_OSC_PT2PT_GET: header->hdr_base.hdr_type = OMPI_OSC_PT2PT_HDR_GET; #if OMPI_ENABLE_MEM_DEBUG header->hdr_target_op = 0; #endif break; } /* Set datatype id and / or pack datatype */ ret = ompi_ddt_get_pack_description(sendreq->req_target_datatype, &packed_ddt); if (OMPI_SUCCESS != ret) goto cleanup; memcpy((unsigned char*) buffer->payload + written_data, packed_ddt, packed_ddt_len); written_data += packed_ddt_len; if (OMPI_OSC_PT2PT_GET != sendreq->req_type) { /* if sending data and it fits, pack payload */ if (mca_osc_pt2pt_component.p2p_c_eager_size >= written_data + sendreq->req_origin_bytes_packed) { struct iovec iov; uint32_t iov_count = 1; size_t max_data = sendreq->req_origin_bytes_packed; iov.iov_len = max_data; iov.iov_base = (IOVBASE_TYPE*)((unsigned char*) buffer->payload + written_data); ret = ompi_convertor_pack(&sendreq->req_origin_convertor, &iov, &iov_count, &max_data ); if (ret < 0) { ret = OMPI_ERR_FATAL; goto cleanup; } assert(max_data == sendreq->req_origin_bytes_packed); written_data += max_data; header->hdr_msg_length = sendreq->req_origin_bytes_packed; } else { header->hdr_msg_length = 0; header->hdr_origin_tag = create_send_tag(module); } } else { header->hdr_msg_length = 0; } buffer->len = written_data; #ifdef WORDS_BIGENDIAN header->hdr_base.hdr_flags |= OMPI_OSC_PT2PT_HDR_FLAG_NBO; #elif OMPI_ENABLE_HETEROGENEOUS_SUPPORT if (sendreq->req_target_proc->proc_arch & OMPI_ARCH_ISBIGENDIAN) { header->hdr_base.hdr_flags |= OMPI_OSC_PT2PT_HDR_FLAG_NBO; OMPI_OSC_PT2PT_SEND_HDR_HTON(*header); } #endif /* send fragment */ opal_output_verbose(51, ompi_osc_base_output, "%d sending sendreq to %d", sendreq->req_module->p2p_comm->c_my_rank, sendreq->req_target_rank); ret = MCA_PML_CALL(isend(buffer->payload, buffer->len, MPI_BYTE, sendreq->req_target_rank, -200, MCA_PML_BASE_SEND_STANDARD, module->p2p_comm, &buffer->request)); opal_list_append(&module->p2p_pending_control_sends, &buffer->super.super); goto done; cleanup: if (item != NULL) { OPAL_FREE_LIST_RETURN(&mca_osc_pt2pt_component.p2p_c_buffers, item); } done: return ret; }
static inline int ompi_mtl_portals4_callback(ptl_event_t *ev, ompi_mtl_portals4_base_request_t* ptl_base_request, bool *complete) { int retval = OMPI_SUCCESS, ret, val, add = 1; ompi_mtl_portals4_isend_request_t* ptl_request = (ompi_mtl_portals4_isend_request_t*) ptl_base_request; #if OMPI_MTL_PORTALS4_FLOW_CONTROL if (OPAL_UNLIKELY(ev->ni_fail_type == PTL_NI_PT_DISABLED)) { ompi_mtl_portals4_pending_request_t *pending = ptl_request->pending; OPAL_OUTPUT_VERBOSE((10, ompi_mtl_base_framework.framework_output, "send %lu hit flow control (%d)", ptl_request->opcount, ev->type)); /* BWB: FIX ME: this is a hack.. */ if (pending->fc_notified) { return OMPI_SUCCESS; } pending->fc_notified = 1; if (!PtlHandleIsEqual(ptl_request->me_h, PTL_INVALID_HANDLE)) { ret = PtlMEUnlink(ptl_request->me_h); if (PTL_OK != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: send callback PtlMEUnlink returned %d", __FILE__, __LINE__, ret); } } opal_list_append(&ompi_mtl_portals4.flowctl.pending_sends, &pending->super.super); OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, 1); ompi_mtl_portals4_flowctl_trigger(); return OMPI_SUCCESS; } #endif if (OPAL_UNLIKELY(ev->ni_fail_type != PTL_NI_OK)) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: send callback ni_fail_type: %d", __FILE__, __LINE__, ev->ni_fail_type); *complete = true; return OMPI_ERROR; } OPAL_OUTPUT_VERBOSE((50, ompi_mtl_base_framework.framework_output, "send %lu got event of type %d", ptl_request->opcount, ev->type)); if ((PTL_EVENT_ACK == ev->type) && (PTL_PRIORITY_LIST == ev->ptl_list) && (eager == ompi_mtl_portals4.protocol) && (!PtlHandleIsEqual(ptl_request->me_h, PTL_INVALID_HANDLE))) { /* long expected messages with the eager protocol won't see a get event to complete the message. Give them an extra count to cause the message to complete with just the SEND and ACK events and remove the ME. (we wait for the counter to reach 3 events, but short messages start the counter at 1, so they don't need to enter this path) */ ret = PtlMEUnlink(ptl_request->me_h); if (PTL_OK != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: send callback PtlMEUnlink returned %d", __FILE__, __LINE__, ret); } add++; } val = OPAL_THREAD_ADD32((int32_t*)&ptl_request->event_count, add); assert(val <= 3); if (val == 3) { if (NULL != ptl_request->buffer_ptr) { free(ptl_request->buffer_ptr); } OPAL_OUTPUT_VERBOSE((50, ompi_mtl_base_framework.framework_output, "send %lu completed", ptl_request->opcount)); *complete = true; #if OMPI_MTL_PORTALS4_FLOW_CONTROL OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, 1); OPAL_FREE_LIST_RETURN(&ompi_mtl_portals4.flowctl.pending_fl, &ptl_request->pending->super); if (OPAL_UNLIKELY(0 != opal_list_get_size(&ompi_mtl_portals4.flowctl.pending_sends))) { ompi_mtl_portals4_pending_list_progress(); } #endif } return retval; }
int ompi_osc_pt2pt_component_select(ompi_win_t *win, ompi_info_t *info, ompi_communicator_t *comm) { ompi_osc_pt2pt_module_t *module = NULL; int ret, i; ompi_osc_pt2pt_buffer_t *buffer = NULL; opal_free_list_item_t *item = NULL; char *tmp = NULL; /* create module structure */ module = (ompi_osc_pt2pt_module_t*) calloc(1, sizeof(ompi_osc_pt2pt_module_t)); if (NULL == module) return OMPI_ERR_TEMP_OUT_OF_RESOURCE; /* fill in the function pointer part */ memcpy(module, &ompi_osc_pt2pt_module_template, sizeof(ompi_osc_base_module_t)); /* initialize the p2p part */ OBJ_CONSTRUCT(&(module->p2p_lock), opal_mutex_t); OBJ_CONSTRUCT(&(module->p2p_cond), opal_condition_t); OBJ_CONSTRUCT(&(module->p2p_acc_lock), opal_mutex_t); OBJ_CONSTRUCT(&module->p2p_pending_sendreqs, opal_list_t); OBJ_CONSTRUCT(&(module->p2p_copy_pending_sendreqs), opal_list_t); OBJ_CONSTRUCT(&(module->p2p_locks_pending), opal_list_t); OBJ_CONSTRUCT(&(module->p2p_unlocks_pending), opal_list_t); module->p2p_win = win; ret = ompi_comm_dup(comm, &(module->p2p_comm)); if (ret != OMPI_SUCCESS) goto cleanup; opal_output_verbose(1, ompi_osc_base_framework.framework_output, "pt2pt component creating window with id %d", ompi_comm_get_cid(module->p2p_comm)); asprintf(&tmp, "%d", ompi_comm_get_cid(module->p2p_comm)); ompi_win_set_name(win, tmp); free(tmp); module->p2p_num_pending_sendreqs = (unsigned int*) malloc(sizeof(unsigned int) * ompi_comm_size(module->p2p_comm)); if (NULL == module->p2p_num_pending_sendreqs) { ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE; goto cleanup; } memset(module->p2p_num_pending_sendreqs, 0, sizeof(unsigned int) * ompi_comm_size(module->p2p_comm)); module->p2p_num_pending_out = 0; module->p2p_num_pending_in = 0; module->p2p_num_post_msgs = 0; module->p2p_num_complete_msgs = 0; module->p2p_tag_counter = 0; module->p2p_copy_num_pending_sendreqs = (unsigned int*) malloc(sizeof(unsigned int) * ompi_comm_size(module->p2p_comm)); if (NULL == module->p2p_copy_num_pending_sendreqs) { ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE; goto cleanup; } memset(module->p2p_num_pending_sendreqs, 0, sizeof(unsigned int) * ompi_comm_size(module->p2p_comm)); /* fence data */ module->p2p_fence_coll_counts = (int*) malloc(sizeof(int) * ompi_comm_size(module->p2p_comm)); if (NULL == module->p2p_fence_coll_counts) { ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE; goto cleanup; } for (i = 0 ; i < ompi_comm_size(module->p2p_comm) ; ++i) { module->p2p_fence_coll_counts[i] = 1; } /* pwsc data */ module->p2p_pw_group = NULL; module->p2p_sc_group = NULL; module->p2p_sc_remote_active_ranks = (bool*) malloc(sizeof(bool) * ompi_comm_size(module->p2p_comm)); if (NULL == module->p2p_sc_remote_active_ranks) { ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE; goto cleanup; } module->p2p_sc_remote_ranks = (int*) malloc(sizeof(int) * ompi_comm_size(module->p2p_comm)); if (NULL == module->p2p_sc_remote_ranks) { ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE; goto cleanup; } /* lock data */ module->p2p_lock_status = 0; module->p2p_shared_count = 0; module->p2p_lock_received_ack = 0; /* fill in window information */ win->w_osc_module = (ompi_osc_base_module_t*) module; /* sync memory - make sure all initialization completed */ opal_atomic_mb(); /* start up receive for protocol headers */ OPAL_FREE_LIST_GET(&mca_osc_pt2pt_component.p2p_c_buffers, item, ret); if (OMPI_SUCCESS != ret) goto cleanup; buffer = (ompi_osc_pt2pt_buffer_t*) item; buffer->data = (void*) module; ret = ompi_osc_pt2pt_component_irecv(buffer->payload, mca_osc_pt2pt_component.p2p_c_eager_size, MPI_BYTE, MPI_ANY_SOURCE, CONTROL_MSG_TAG, module->p2p_comm, &(buffer->request), component_fragment_cb, buffer); if (OMPI_SUCCESS != ret) goto cleanup; return OMPI_SUCCESS; cleanup: OBJ_DESTRUCT(&module->p2p_unlocks_pending); OBJ_DESTRUCT(&module->p2p_locks_pending); OBJ_DESTRUCT(&module->p2p_copy_pending_sendreqs); OBJ_DESTRUCT(&module->p2p_pending_sendreqs); OBJ_DESTRUCT(&module->p2p_acc_lock); OBJ_DESTRUCT(&module->p2p_cond); OBJ_DESTRUCT(&module->p2p_lock); if (NULL != buffer) { OPAL_FREE_LIST_RETURN(&mca_osc_pt2pt_component.p2p_c_buffers, item); } if (NULL != module->p2p_sc_remote_ranks) { free(module->p2p_sc_remote_ranks); } if (NULL != module->p2p_sc_remote_active_ranks) { free(module->p2p_sc_remote_active_ranks); } if (NULL != module->p2p_fence_coll_counts) { free(module->p2p_fence_coll_counts); } if (NULL != module->p2p_copy_num_pending_sendreqs) { free(module->p2p_copy_num_pending_sendreqs); } if (NULL != module->p2p_num_pending_sendreqs) { free(module->p2p_num_pending_sendreqs); } if (NULL != module->p2p_comm) ompi_comm_free(&module->p2p_comm); #if OPAL_ENABLE_DEBUG memset(module, 0, sizeof(ompi_osc_base_module_t)); #endif if (NULL != module) free(module); return ret; }