/** * Prepare a descriptor for send/rdma using the supplied * convertor. If the convertor references data that is contiguous, * the descriptor may simply point to the user buffer. Otherwise, * this routine is responsible for allocating buffer space and * packing if required. * * @param btl (IN) BTL module * @param endpoint (IN) BTL peer addressing * @param convertor (IN) Data type convertor * @param reserve (IN) Additional bytes requested by upper layer to precede user data * @param size (IN/OUT) Number of bytes to prepare (IN), number of bytes actually prepared (OUT) */ mca_btl_base_descriptor_t* mca_btl_udapl_prepare_dst( struct mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t* endpoint, struct mca_mpool_base_registration_t* registration, struct opal_convertor_t* convertor, uint8_t order, size_t reserve, size_t* size, uint32_t flags) { mca_btl_udapl_frag_t* frag; int rc; MCA_BTL_UDAPL_FRAG_ALLOC_USER(btl, frag, rc); if(NULL == frag) { return NULL; } frag->segment.seg_len = *size; opal_convertor_get_current_pointer( convertor, (void**)&(frag->segment.seg_addr.pval) ); if(NULL == registration) { /* didn't get a memory registration passed in, so must * register the region now */ rc = btl->btl_mpool->mpool_register(btl->btl_mpool, frag->segment.seg_addr.pval, frag->segment.seg_len, 0, ®istration); if(OMPI_SUCCESS != rc || NULL == registration) { MCA_BTL_UDAPL_FRAG_RETURN_USER(btl,frag); return NULL; } frag->registration = (mca_btl_udapl_reg_t*)registration; } frag->base.des_src = NULL; frag->base.des_src_cnt = 0; frag->base.des_dst = &frag->segment; frag->base.des_dst_cnt = 1; frag->base.des_flags = flags; frag->segment.seg_key.key32[0] = ((mca_btl_udapl_reg_t*)registration)->rmr_context; frag->base.order = MCA_BTL_NO_ORDER; return &frag->base; }
int mca_btl_udapl_free( struct mca_btl_base_module_t* btl, mca_btl_base_descriptor_t* des) { mca_btl_udapl_frag_t* frag = (mca_btl_udapl_frag_t*)des; if(0 == frag->size) { if (NULL != frag->registration) { btl->btl_mpool->mpool_deregister(btl->btl_mpool, &(frag->registration->base)); frag->registration = NULL; } MCA_BTL_UDAPL_FRAG_RETURN_USER(btl, frag); } else if(frag->size == mca_btl_udapl_component.udapl_eager_frag_size) { MCA_BTL_UDAPL_FRAG_RETURN_EAGER(btl, frag); } else if(frag->size == mca_btl_udapl_component.udapl_max_frag_size) { MCA_BTL_UDAPL_FRAG_RETURN_MAX(btl, frag); } else { BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_DIAGNOSE, ("mca_btl_udapl_free: invalid descriptor\n")); return OMPI_ERR_BAD_PARAM; } return OMPI_SUCCESS; }
/** * Pack data and return a descriptor that can be * used for send/put. * * @param btl (IN) BTL module * @param peer (IN) BTL peer addressing */ mca_btl_base_descriptor_t* mca_btl_udapl_prepare_src( struct mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t* endpoint, struct mca_mpool_base_registration_t* registration, struct opal_convertor_t* convertor, uint8_t order, size_t reserve, size_t* size, uint32_t flags ) { mca_btl_udapl_frag_t* frag = NULL; struct iovec iov; uint32_t iov_count = 1; size_t max_data = *size; int rc; int pad = 0; /* compute pad as needed */ MCA_BTL_UDAPL_FRAG_CALC_ALIGNMENT_PAD(pad, (max_data + reserve + sizeof(mca_btl_udapl_footer_t))); if(opal_convertor_need_buffers(convertor) == false && 0 == reserve) { if(registration != NULL || max_data > btl->btl_max_send_size) { MCA_BTL_UDAPL_FRAG_ALLOC_USER(btl, frag); if(NULL == frag){ return NULL; } iov.iov_len = max_data; iov.iov_base = NULL; opal_convertor_pack(convertor, &iov, &iov_count, &max_data ); *size = max_data; if(NULL == registration) { rc = btl->btl_mpool->mpool_register(btl->btl_mpool, iov.iov_base, max_data, 0, ®istration); if(rc != OMPI_SUCCESS) { MCA_BTL_UDAPL_FRAG_RETURN_USER(btl,frag); return NULL; } /* keep track of the registration we did */ frag->registration = (mca_btl_udapl_reg_t*)registration; } frag->segment.base.seg_len = max_data; frag->segment.base.seg_addr.pval = iov.iov_base; frag->triplet.segment_length = max_data; frag->triplet.virtual_address = (DAT_VADDR)(uintptr_t)iov.iov_base; frag->triplet.lmr_context = ((mca_btl_udapl_reg_t*)registration)->lmr_triplet.lmr_context; /* initialize base descriptor */ frag->base.des_src = &frag->segment; frag->base.des_src_cnt = 1; frag->base.des_dst = NULL; frag->base.des_dst_cnt = 0; frag->base.des_flags = flags; frag->base.order = MCA_BTL_NO_ORDER; return &frag->base; } } if(max_data + pad + reserve <= btl->btl_eager_limit) { /* the data is small enough to fit in the eager frag and * memory is not prepinned */ MCA_BTL_UDAPL_FRAG_ALLOC_EAGER(btl, frag); } if(NULL == frag) { /* the data doesn't fit into eager frag or eager frag is * not available */ MCA_BTL_UDAPL_FRAG_ALLOC_MAX(btl, frag); if(NULL == frag) { return NULL; } if(max_data + reserve > btl->btl_max_send_size) { max_data = btl->btl_max_send_size - reserve; } } iov.iov_len = max_data; iov.iov_base = (char *) frag->segment.base.seg_addr.pval + reserve; rc = opal_convertor_pack(convertor, &iov, &iov_count, &max_data ); if(rc < 0) { MCA_BTL_UDAPL_FRAG_RETURN_MAX(btl, frag); return NULL; } *size = max_data; /* setup lengths and addresses to send out data */ frag->segment.base.seg_len = max_data + reserve; frag->triplet.segment_length = max_data + reserve + sizeof(mca_btl_udapl_footer_t); frag->triplet.virtual_address = (DAT_VADDR)(uintptr_t)frag->segment.base.seg_addr.pval; /* initialize base descriptor */ frag->base.des_src = &frag->segment; frag->base.des_src_cnt = 1; frag->base.des_dst = NULL; frag->base.des_dst_cnt = 0; frag->base.des_flags = flags; frag->base.order = MCA_BTL_NO_ORDER; return &frag->base; }