/** * Allocate a segment. * * @param btl (IN) BTL module * @param size (IN) Request segment size. */ mca_btl_base_descriptor_t* mca_btl_self_alloc( struct mca_btl_base_module_t* btl, size_t size ) { mca_btl_self_frag_t* frag; int rc; if(size <= mca_btl_self.btl_eager_limit) { MCA_BTL_SELF_FRAG_ALLOC_EAGER(frag,rc); frag->segment.seg_len = size; } else if (size <= btl->btl_max_send_size) { MCA_BTL_SELF_FRAG_ALLOC_SEND(frag,rc); frag->segment.seg_len = size; } else { return NULL; } frag->base.des_flags = 0; frag->base.des_src = &(frag->segment); frag->base.des_src_cnt = 1; return (mca_btl_base_descriptor_t*)frag; }
/** * Allocate a segment. * * @param btl (IN) BTL module * @param size (IN) Request segment size. */ mca_btl_base_descriptor_t* mca_btl_self_alloc( struct mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t* endpoint, uint8_t order, size_t size, uint32_t flags) { mca_btl_self_frag_t* frag = NULL; if(size <= mca_btl_self.btl_eager_limit) { MCA_BTL_SELF_FRAG_ALLOC_EAGER(frag); } else if (size <= btl->btl_max_send_size) { MCA_BTL_SELF_FRAG_ALLOC_SEND(frag); } if( OPAL_UNLIKELY(NULL == frag) ) { return NULL; } frag->segment.seg_len = size; frag->base.des_flags = flags; frag->base.des_src = &(frag->segment); frag->base.des_src_cnt = 1; return (mca_btl_base_descriptor_t*)frag; }
/** * Allocate a segment. * * @param btl (IN) BTL module * @param size (IN) Request segment size. */ static mca_btl_base_descriptor_t *mca_btl_self_alloc (struct mca_btl_base_module_t *btl, struct mca_btl_base_endpoint_t *endpoint, uint8_t order, size_t size, uint32_t flags) { mca_btl_self_frag_t *frag = NULL; if (size <= MCA_BTL_SELF_MAX_INLINE_SIZE) { MCA_BTL_SELF_FRAG_ALLOC_RDMA(frag); } else if (size <= mca_btl_self.btl_eager_limit) { MCA_BTL_SELF_FRAG_ALLOC_EAGER(frag); } else if (size <= btl->btl_max_send_size) { MCA_BTL_SELF_FRAG_ALLOC_SEND(frag); } if( OPAL_UNLIKELY(NULL == frag) ) { return NULL; } frag->segments[0].seg_len = size; frag->base.des_segment_count = 1; frag->base.des_flags = flags; return &frag->base; }
/** * Prepare data for send/put * * @param btl (IN) BTL module */ struct mca_btl_base_descriptor_t* mca_btl_self_prepare_src( struct mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t* endpoint, mca_mpool_base_registration_t* registration, struct opal_convertor_t* convertor, uint8_t order, size_t reserve, size_t* size, uint32_t flags ) { mca_btl_self_frag_t* frag; struct iovec iov; uint32_t iov_count = 1; size_t max_data = *size; int rc; /* non-contigous data */ if( opal_convertor_need_buffers(convertor) || max_data < mca_btl_self.btl_max_send_size || reserve != 0 ) { MCA_BTL_SELF_FRAG_ALLOC_SEND(frag); if(OPAL_UNLIKELY(NULL == frag)) { return NULL; } if(reserve + max_data > frag->size) { max_data = frag->size - reserve; } iov.iov_len = max_data; iov.iov_base = (IOVBASE_TYPE*)((unsigned char*)(frag+1) + reserve); rc = opal_convertor_pack(convertor, &iov, &iov_count, &max_data ); if(rc < 0) { MCA_BTL_SELF_FRAG_RETURN_SEND(frag); return NULL; } frag->segment.seg_addr.pval = frag+1; frag->segment.seg_len = reserve + max_data; *size = max_data; } else { MCA_BTL_SELF_FRAG_ALLOC_RDMA(frag); if(OPAL_UNLIKELY(NULL == frag)) { return NULL; } iov.iov_len = max_data; iov.iov_base = NULL; /* convertor should return offset into users buffer */ rc = opal_convertor_pack(convertor, &iov, &iov_count, &max_data ); if(rc < 0) { MCA_BTL_SELF_FRAG_RETURN_RDMA(frag); return NULL; } frag->segment.seg_addr.lval = (uint64_t)(uintptr_t) iov.iov_base; frag->segment.seg_len = max_data; *size = max_data; } frag->base.des_flags = flags; frag->base.des_src = &frag->segment; frag->base.des_src_cnt = 1; return &frag->base; }