int mca_btl_vader_put (struct mca_btl_base_module_t *btl,
                       struct mca_btl_base_endpoint_t *endpoint,
                       struct mca_btl_base_descriptor_t *des)
{
    mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) des;
    mca_btl_base_segment_t *src = des->des_local;
    mca_btl_base_segment_t *dst = des->des_remote;
    const size_t size = min(dst->seg_len, src->seg_len);
    mca_mpool_base_registration_t *reg;
    void *rem_ptr;

    reg = vader_get_registation (endpoint, dst->seg_addr.pval, dst->seg_len, 0, &rem_ptr);
    if (OPAL_UNLIKELY(NULL == reg)) {
        return OPAL_ERROR;
    }

    vader_memmove (rem_ptr, src->seg_addr.pval, size);

    vader_return_registration (reg, endpoint);

    /* always call the callback function */
    frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;

    mca_btl_vader_frag_complete (frag);

    return OPAL_SUCCESS;
}
int mca_btl_vader_get (struct mca_btl_base_module_t *btl,
                       struct mca_btl_base_endpoint_t *endpoint,
                       struct mca_btl_base_descriptor_t *des)
{
    mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) des;
    mca_btl_base_segment_t *src = des->des_remote;
    mca_btl_base_segment_t *dst = des->des_local;
    const size_t size = min(dst->seg_len, src->seg_len);
    struct iovec src_iov = {.iov_base = src->seg_addr.pval, .iov_len = size};
    struct iovec dst_iov = {.iov_base = dst->seg_addr.pval, .iov_len = size};
    ssize_t ret;

    ret = process_vm_readv (endpoint->seg_ds.seg_cpid, &dst_iov, 1, &src_iov, 1, 0);
    if (ret != (ssize_t)size) {
        opal_output(0, "Read %ld, expected %lu, errno = %d\n", (long)ret, (unsigned long)size, errno);
        return OPAL_ERROR;
    }

    /* always call the callback function */
    frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;

    frag->endpoint = endpoint;
    mca_btl_vader_frag_complete (frag);

    return OPAL_SUCCESS;
}
Exemple #3
0
/**
 * Initiate a send to the peer.
 *
 * @param btl (IN)      BTL module
 * @param peer (IN)     BTL peer addressing
 */
int mca_btl_vader_send (struct mca_btl_base_module_t *btl,
                        struct mca_btl_base_endpoint_t *endpoint,
                        struct mca_btl_base_descriptor_t *descriptor,
                        mca_btl_base_tag_t tag)
{
    mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) descriptor;

    if (OPAL_LIKELY(frag->fbox)) {
        mca_btl_vader_fbox_send (frag->fbox, tag, frag->segments[0].seg_len);
        mca_btl_vader_frag_complete (frag);

        return 1;
    }

    /* header (+ optional inline data) */
    frag->hdr->len = frag->segments[0].seg_len;
    /* type of message, pt-2-pt, one-sided, etc */
    frag->hdr->tag = tag;

    /* post the relative address of the descriptor into the peer's fifo */
    vader_fifo_write_ep (frag->hdr, endpoint);

    if ((frag->hdr->flags & MCA_BTL_VADER_FLAG_SINGLE_COPY) ||
        !(frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP)) {
        frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;

        return 0;
    }

    /* data is gone (from the pml's perspective). frag callback/release will
       happen later */
    return 1;
}
Exemple #4
0
/**
 * Initiate a send to the peer.
 *
 * @param btl (IN)      BTL module
 * @param peer (IN)     BTL peer addressing
 */
int mca_btl_vader_send (struct mca_btl_base_module_t *btl,
                        struct mca_btl_base_endpoint_t *endpoint,
                        struct mca_btl_base_descriptor_t *descriptor,
                        mca_btl_base_tag_t tag)
{
    mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) descriptor;
    const size_t total_size = frag->segments[0].seg_len;

    if (OPAL_LIKELY(frag->fbox)) {
        mca_btl_vader_fbox_send (frag->fbox, tag);
        mca_btl_vader_frag_complete (frag);

        return 1;
    }

    /* header (+ optional inline data) */
    frag->hdr->len = total_size;
    /* type of message, pt-2-pt, one-sided, etc */
    frag->hdr->tag = tag;

    /* post the relative address of the descriptor into the peer's fifo */
    if (opal_list_get_size (&endpoint->pending_frags) || !vader_fifo_write_ep (frag->hdr, endpoint)) {
        frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;
        OPAL_THREAD_LOCK(&endpoint->pending_frags_lock);
        opal_list_append (&endpoint->pending_frags, (opal_list_item_t *) frag);
        if (!endpoint->waiting) {
            OPAL_THREAD_LOCK(&mca_btl_vader_component.lock);
            opal_list_append (&mca_btl_vader_component.pending_endpoints, &endpoint->super);
            OPAL_THREAD_UNLOCK(&mca_btl_vader_component.lock);
            endpoint->waiting = true;
        }
        OPAL_THREAD_UNLOCK(&endpoint->pending_frags_lock);
        return OPAL_SUCCESS;
    }

    if ((frag->hdr->flags & MCA_BTL_VADER_FLAG_SINGLE_COPY) ||
        !(frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP)) {
        frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;

        return OPAL_SUCCESS;
    }

    /* data is gone (from the pml's perspective). frag callback/release will
       happen later */
    return 1;
}
static inline void mca_btl_vader_progress_sends (void)
{
    opal_list_t *list = &mca_btl_vader_component.active_sends;
    opal_list_item_t *item, *next;
    mca_btl_vader_frag_t *frag;

    for (item = opal_list_get_first (list) ; item != opal_list_get_end (list) ; ) {
        frag = (mca_btl_vader_frag_t *) item;
        next = opal_list_get_next (item);

        if (OPAL_LIKELY(frag->hdr->complete)) {
            opal_list_remove_item (&mca_btl_vader_component.active_sends, item);

            mca_btl_vader_frag_complete (frag);
        }

        item = next;
    }
}
int mca_btl_vader_put (struct mca_btl_base_module_t *btl,
                       struct mca_btl_base_endpoint_t *endpoint,
                       struct mca_btl_base_descriptor_t *des)
{
    mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) des;
    mca_btl_base_segment_t *src = des->des_local;
    mca_btl_base_segment_t *dst = des->des_remote;
    const size_t size = min(dst->seg_len, src->seg_len);
    struct iovec src_iov = {.iov_base = src->seg_addr.pval, .iov_len = size};
    struct iovec dst_iov = {.iov_base = dst->seg_addr.pval, .iov_len = size};
    ssize_t ret;

    ret = process_vm_writev (endpoint->seg_ds.seg_cpid, &src_iov, 1, &dst_iov, 1, 0);
    if (ret != (ssize_t)size) {
        opal_output(0, "Wrote %ld, expected %lu, errno = %d\n", (long)ret, (unsigned long)size, errno);
        return OPAL_ERROR;
    }

    mca_btl_vader_frag_complete (frag);

    return OPAL_SUCCESS;
}
int mca_btl_vader_put (struct mca_btl_base_module_t *btl,
                       struct mca_btl_base_endpoint_t *endpoint,
                       struct mca_btl_base_descriptor_t *des)
{
    mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) des;
    mca_btl_base_segment_t *src = des->des_src;
    mca_btl_base_segment_t *dst = des->des_dst;
    const size_t size = min(dst->seg_len, src->seg_len);
    struct iovec src_iov = {.iov_base = src->seg_addr.pval, .iov_len = size};
    struct iovec dst_iov = {.iov_base = dst->seg_addr.pval, .iov_len = size};
    ssize_t ret;

    ret = process_vm_writev (endpoint->seg_ds.seg_cpid, &src_iov, 1, &dst_iov, 1, 0);
    if (ret != size) {
        fprintf (stderr, "Wrote %d, expected %u\n", ret, size);
        return OMPI_ERROR;
    }

    mca_btl_vader_frag_complete (frag);

    return OMPI_SUCCESS;
}
/**
 * Initiate an synchronous get.
 *
 * @param btl (IN)         BTL module
 * @param endpoint (IN)    BTL addressing information
 * @param descriptor (IN)  Description of the data to be transferred
 */
int mca_btl_vader_get (struct mca_btl_base_module_t *btl,
                       struct mca_btl_base_endpoint_t *endpoint,
                       struct mca_btl_base_descriptor_t *des)
{
    mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) des;
    mca_btl_base_segment_t *src = des->des_src;
    mca_btl_base_segment_t *dst = des->des_dst;
    const size_t size = min(dst->seg_len, src->seg_len);
    mca_mpool_base_registration_t *reg;
    void *rem_ptr;

    reg = vader_get_registation (endpoint, src->seg_addr.pval, src->seg_len, 0, &rem_ptr);
    if (OPAL_UNLIKELY(NULL == rem_ptr)) {
        return OMPI_ERROR;
    }

    vader_memmove (dst->seg_addr.pval, rem_ptr, size);

    vader_return_registration (reg, endpoint);

    mca_btl_vader_frag_complete (frag);

    return OMPI_SUCCESS;
}
Exemple #9
0
int mca_btl_vader_put_cma (struct mca_btl_base_module_t *btl,
                           struct mca_btl_base_endpoint_t *endpoint,
                           struct mca_btl_base_descriptor_t *des)
{
    mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) des;
    mca_btl_base_segment_t *src = des->des_src;
    mca_btl_base_segment_t *dst = des->des_dst;
    const size_t size = min(dst->seg_len, src->seg_len);
    struct iovec src_iov = {.iov_base = src->seg_addr.pval, .iov_len = size};
    struct iovec dst_iov = {.iov_base = dst->seg_addr.pval, .iov_len = size};
    ssize_t ret;

    ret = process_vm_writev (endpoint->segment_data.other.seg_ds->seg_cpid, &src_iov, 1, &dst_iov, 1, 0);
    if (ret != (ssize_t)size) {
        opal_output(0, "Wrote %ld, expected %lu, errno = %d\n", (long)ret, (unsigned long)size, errno);
        return OMPI_ERROR;
    }

    /* always call the callback function */
    frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;

    frag->endpoint = endpoint;
    mca_btl_vader_frag_complete (frag);

    return OMPI_SUCCESS;
}
#endif

#if OMPI_BTL_VADER_HAVE_KNEM
int mca_btl_vader_put_knem (struct mca_btl_base_module_t *btl,
                            struct mca_btl_base_endpoint_t *endpoint,
                            struct mca_btl_base_descriptor_t *des)
{
    mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) des;
    mca_btl_vader_segment_t *src = (mca_btl_vader_segment_t *) des->des_src;
    mca_btl_vader_segment_t *dst = (mca_btl_vader_segment_t *) des->des_dst;
    const size_t size = min(dst->base.seg_len, src->base.seg_len);
    struct knem_cmd_param_iovec send_iovec;
    struct knem_cmd_inline_copy icopy;

    /* Fill in the ioctl data fields.  There's no async completion, so
       we don't need to worry about getting a slot, etc. */
    send_iovec.base = (uintptr_t) dst->base.seg_addr.lval;
    send_iovec.len = size;
    icopy.local_iovec_array = (uintptr_t) &send_iovec;
    icopy.local_iovec_nr    = 1;
    icopy.remote_cookie     = dst->cookie;
    icopy.remote_offset     = 0;
    icopy.write             = 1;
    icopy.flags             = 0;

    /* Use the DMA flag if knem supports it *and* the segment length
     * is greater than the cutoff. Not that if DMA is not supported
     * or the user specified 0 for knem_dma_min the knem_dma_min was
     * set to UINT_MAX in mca_btl_vader_knem_init. */
    if (mca_btl_vader_component.knem_dma_min <= dst->base.seg_len) {
        icopy.flags = KNEM_FLAG_DMA;
    }
    /* synchronous flags only, no need to specify icopy.async_status_index */

    /* When the ioctl returns, the transfer is done and we can invoke
       the btl callback and return the frag */
    if (OPAL_UNLIKELY(0 != ioctl (mca_btl_vader.knem_fd, KNEM_CMD_INLINE_COPY, &icopy))) {
        return OMPI_ERROR;
    }

    if (KNEM_STATUS_FAILED == icopy.current_status) {
        return OMPI_ERROR;
    }

    /* always call the callback function */
    frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;

    frag->endpoint = endpoint;
    mca_btl_vader_frag_complete (frag);

    return OMPI_SUCCESS;
}