예제 #1
0
int
mca_io_romio314_file_iwrite (ompi_file_t *fh,
                          const void *buf,
                          int count,
                          struct ompi_datatype_t *datatype,
                          ompi_request_t **request)
{
    int ret;
    mca_io_romio314_data_t *data;

    data = (mca_io_romio314_data_t *) fh->f_io_selected_data;
    OPAL_THREAD_LOCK (&mca_io_romio314_mutex);
    ret =
        ROMIO_PREFIX(MPI_File_iwrite) (data->romio_fh, buf, count, datatype,
                                       request);
    OPAL_THREAD_UNLOCK (&mca_io_romio314_mutex);

    return ret;
}
예제 #2
0
int
mca_io_romio314_file_write_all (ompi_file_t *fh,
                             const void *buf,
                             int count,
                             struct ompi_datatype_t *datatype,
                             ompi_status_public_t * status)
{
    int         ret;
    mca_io_romio314_data_t *data;

    data = (mca_io_romio314_data_t *) fh->f_io_selected_data;
    OPAL_THREAD_LOCK (&mca_io_romio314_mutex);
    ret =
        ROMIO_PREFIX(MPI_File_write_all) (data->romio_fh, buf, count, datatype,
                                         status);
    OPAL_THREAD_UNLOCK (&mca_io_romio314_mutex);

    return ret;
}
예제 #3
0
파일: orte_wait.c 프로젝트: bringhurst/ompi
pid_t
orte_waitpid(pid_t wpid, int *status, int options)
{
    opal_process_handle_t* pending;

    OPAL_THREAD_LOCK(&mutex);

    /**
     * Is the child already gone ?
     */
    pending = find_pending_pid( wpid, false );
    if( NULL != pending ) {
        *status = pending->status;
        opal_list_remove_item( &pending_pids, (opal_list_item_t*)pending );
        OBJ_RELEASE(pending);
        OPAL_THREAD_UNLOCK(&mutex);
        return wpid;
    }

    /**
     * Do we have any registered callback for this particular pid ?
     */
    pending = find_pending_cb( wpid, false );
    if( NULL != pending ) {
        opal_list_remove_item( &registered_cb, (opal_list_item_t*)pending );
        OBJ_RELEASE( pending );
    }

    /**
     * No luck so far. Wait until the process complete ...
     */
    if( WAIT_OBJECT_0 == WaitForSingleObject( (HANDLE)wpid, INFINITE ) ) {
        DWORD exitCode;
        /* Process completed. Grab the exit value and return. */
        if( 0 == GetExitCodeProcess( (HANDLE)wpid, &exitCode ) ) {
            int error = GetLastError();
        }
        *status = (int)exitCode;
    }
    OPAL_THREAD_UNLOCK(&mutex);
    return wpid;
}
예제 #4
0
파일: oob_tcp_peer.c 프로젝트: aosm/openmpi
/*
 *  Initiate the appropriate action based on the state of the connection
 *  to the peer.
 *
 */
int mca_oob_tcp_peer_send(mca_oob_tcp_peer_t* peer, mca_oob_tcp_msg_t* msg)
{
    int rc = ORTE_SUCCESS;
    OPAL_THREAD_LOCK(&peer->peer_lock);
    switch(peer->peer_state) {
    case MCA_OOB_TCP_CONNECTING:
    case MCA_OOB_TCP_CONNECT_ACK:
    case MCA_OOB_TCP_CLOSED:
    case MCA_OOB_TCP_RESOLVE:
        /*
         * queue the message and attempt to resolve the peer address
         */
        opal_list_append(&peer->peer_send_queue, (opal_list_item_t*)msg);
        if(peer->peer_state == MCA_OOB_TCP_CLOSED) {
            peer->peer_state = MCA_OOB_TCP_RESOLVE;
            OPAL_THREAD_UNLOCK(&peer->peer_lock);
            return mca_oob_tcp_resolve(peer);
        }
        break;
    case MCA_OOB_TCP_FAILED:
        rc = ORTE_ERR_UNREACH;
        break;
    case MCA_OOB_TCP_CONNECTED:
        /*
         * start the message and queue if not completed 
         */
        if (NULL != peer->peer_send_msg) {
            opal_list_append(&peer->peer_send_queue, (opal_list_item_t*)msg);
        } else {
            /*if the send does not complete */
            if(!mca_oob_tcp_msg_send_handler(msg, peer)) {
                peer->peer_send_msg = msg;
                opal_event_add(&peer->peer_send_event, 0);
            } else {
                mca_oob_tcp_msg_complete(msg, &peer->peer_name);
            }
        }
        break;
    }
    OPAL_THREAD_UNLOCK(&peer->peer_lock);
    return rc;
}
예제 #5
0
파일: proc.c 프로젝트: XuanWang1982/ompi
int oshmem_proc_pack(oshmem_proc_t **proclist,
                     int proclistsize,
                     opal_buffer_t* buf)
{
    int i, rc;

    OPAL_THREAD_LOCK(&oshmem_proc_lock);

    /* cycle through the provided array, packing the OSHMEM level
     * data for each proc. This data may or may not be included
     * in any subsequent modex operation, so we include it here
     * to ensure completion of a connect/accept handshake. See
     * the ompi/mca/dpm framework for an example of where and how
     * this info is used.
     *
     * Eventually, we will review the procedures that call this
     * function to see if duplication of communication can be
     * reduced. For now, just go ahead and pack the info so it
     * can be sent.
     */
    for (i = 0; i < proclistsize; i++) {
        rc = opal_dss.pack(buf, &(proclist[i]->super.proc_name), 1, ORTE_NAME);
        if (rc != ORTE_SUCCESS) {
            ORTE_ERROR_LOG(rc);
            OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
            return rc;
        }
        rc = opal_dss.pack(buf, &(proclist[i]->super.proc_arch), 1, OPAL_UINT32);
        if (rc != ORTE_SUCCESS) {
            ORTE_ERROR_LOG(rc);
            OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
            return rc;
        }
        rc = opal_dss.pack(buf, &(proclist[i]->super.proc_hostname), 1, OPAL_STRING);
        if (rc != ORTE_SUCCESS) {
            ORTE_ERROR_LOG(rc);
            OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
            return rc;
        }
    } OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
    return OSHMEM_SUCCESS;
}
예제 #6
0
void mca_btl_tcp_proc_destruct(mca_btl_tcp_proc_t* tcp_proc)
{
    if( NULL != tcp_proc->proc_opal ) {
        /* remove from list of all proc instances */
        OPAL_THREAD_LOCK(&mca_btl_tcp_component.tcp_lock);
        opal_proc_table_remove_value(&mca_btl_tcp_component.tcp_procs,
                                     tcp_proc->proc_opal->proc_name);
        OPAL_THREAD_UNLOCK(&mca_btl_tcp_component.tcp_lock);
        OBJ_RELEASE(tcp_proc->proc_opal);
        tcp_proc->proc_opal = NULL;
    }
    /* release resources */
    if(NULL != tcp_proc->proc_endpoints) {
        free(tcp_proc->proc_endpoints);
    }
    if(NULL != tcp_proc->proc_addrs) {
        free(tcp_proc->proc_addrs);
    }
    OBJ_DESTRUCT(&tcp_proc->proc_lock);
}
예제 #7
0
/*
 * Return the length of a value
 */
int ompi_info_get_valuelen (ompi_info_t *info, const char *key, int *valuelen,
                            int *flag)
{
    ompi_info_entry_t *search;

    OPAL_THREAD_LOCK(info->i_lock);
    search = info_find_key (info, key);
    if (NULL == search){
        *flag = 0;
    } else {
        /*
         * We have found the element, so we can return the value
         * Set the flag, value_length and value
         */
         *flag = 1;
         *valuelen = strlen(search->ie_value);
    }
    OPAL_THREAD_UNLOCK(info->i_lock);
    return MPI_SUCCESS;
}
예제 #8
0
int orte_pls_base_orted_cancel_operation(void)
{
    /* protect for threads */
    OPAL_THREAD_LOCK(&orte_pls_base.orted_cmd_lock);
    
    /* cancel any waiting receive - we don't want to hear it */
    orte_rml.recv_cancel(ORTE_NAME_WILDCARD, ORTE_RML_TAG_PLS_ORTED_ACK);
    
    /* set the completion status to reflect cancellation -- no need to
       print anything */
    completion_status = ORTE_ERR_SILENT;
    
    /* declare us "done" so we can exit cleanly */
    opal_condition_signal(&orte_pls_base.orted_cmd_cond);
    
    /* unlock us */
    OPAL_THREAD_UNLOCK(&orte_pls_base.orted_cmd_lock);
    
    return ORTE_SUCCESS;
}
예제 #9
0
static int finalize(void)
{
    int rc;
    opal_list_item_t *item;
    
    OPAL_THREAD_LOCK(&mca_iof_orted_component.lock);
    while ((item = opal_list_remove_first(&mca_iof_orted_component.sinks)) != NULL) {
        OBJ_RELEASE(item);
    }
    OBJ_DESTRUCT(&mca_iof_orted_component.sinks);
    while ((item = opal_list_remove_first(&mca_iof_orted_component.procs)) != NULL) {
        OBJ_RELEASE(item);
    }
    OBJ_DESTRUCT(&mca_iof_orted_component.procs);
    /* Cancel the RML receive */
    rc = orte_rml.recv_cancel(ORTE_NAME_WILDCARD, ORTE_RML_TAG_IOF_PROXY);
    OPAL_THREAD_UNLOCK(&mca_iof_orted_component.lock);
    OBJ_DESTRUCT(&mca_iof_orted_component.lock);
    return rc;
}
예제 #10
0
/**
 * This function resize the free_list to contain at least the specified
 * number of elements. We do not create all of them in the same memory
 * segment. Instead we will several time the fl_num_per_alloc elements
 * until we reach the required number of the maximum allowed by the 
 * initialization.
 */
int
ompi_free_list_resize(ompi_free_list_t* flist, size_t size)
{
    ssize_t inc_num;
    int ret = OMPI_SUCCESS;

    if (flist->fl_num_allocated > size) {
        return OMPI_SUCCESS;
    }
    OPAL_THREAD_LOCK(&((flist)->fl_lock));
    inc_num = size - flist->fl_num_allocated;
    while( inc_num > 0 ) {
        ret = ompi_free_list_grow(flist, flist->fl_num_per_alloc);
        if( OMPI_SUCCESS != ret ) break;
        inc_num = size - flist->fl_num_allocated;
    }
    OPAL_THREAD_UNLOCK(&((flist)->fl_lock));

    return ret;
}
예제 #11
0
int
mca_io_romio_file_iread_at (ompi_file_t *fh,
                            MPI_Offset offset,
                            void *buf,
                            int count,
                            struct ompi_datatype_t *datatype,
                            ompi_request_t **request)
{
    int ret;
    mca_io_romio_data_t *data;

    data = (mca_io_romio_data_t *) fh->f_io_selected_data;
    OPAL_THREAD_LOCK (&mca_io_romio_mutex);
    ret =
        ROMIO_PREFIX(MPI_File_iread_at) (data->romio_fh, offset, buf, count,
                                         datatype, request);
    OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);

    return ret;
}
예제 #12
0
int
mca_io_romio314_file_get_view (ompi_file_t *fh,
                            MPI_Offset * disp,
                            struct ompi_datatype_t ** etype,
                            struct ompi_datatype_t ** filetype,
                            char *datarep)
{
    int ret;
    mca_io_romio314_data_t *data;

    data = (mca_io_romio314_data_t *) fh->f_io_selected_data;
    OPAL_THREAD_LOCK (&mca_io_romio314_mutex);
    ret =
        ROMIO_PREFIX(MPI_File_get_view) (data->romio_fh, disp, etype, filetype,
                                        datarep);
    OPAL_THREAD_UNLOCK (&mca_io_romio314_mutex);

    return ret;

}
예제 #13
0
파일: proc.c 프로젝트: IanYXXL/A1
ompi_proc_t * ompi_proc_find ( const ompi_process_name_t * name )
{
    ompi_proc_t *proc, *rproc=NULL;
    ompi_rte_cmp_bitmask_t mask;

    /* return the proc-struct which matches this jobid+process id */
    mask = OMPI_RTE_CMP_JOBID | OMPI_RTE_CMP_VPID;
    OPAL_THREAD_LOCK(&ompi_proc_lock);
    for(proc =  (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
        proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
        proc =  (ompi_proc_t*)opal_list_get_next(proc)) {
        if (OPAL_EQUAL == ompi_rte_compare_name_fields(mask, &proc->proc_name, name)) {
            rproc = proc;
            break;
        }
    }
    OPAL_THREAD_UNLOCK(&ompi_proc_lock);

    return rproc;
}
static inline int32_t
create_send_tag(ompi_osc_pt2pt_module_t *module)
{
#if OMPI_HAVE_THREAD_SUPPORT && OPAL_HAVE_ATOMIC_CMPSET_32
    int32_t newval, oldval;
    do {
        oldval = module->p2p_tag_counter;
        newval = (oldval + 1) % mca_pml.pml_max_tag;
    } while (0 == opal_atomic_cmpset_32(&module->p2p_tag_counter, oldval, newval));
    return newval;
#else
    int32_t ret;
    /* no compare and swap - have to lock the module */
    OPAL_THREAD_LOCK(&module->p2p_lock);
    module->p2p_tag_counter = (module->p2p_tag_counter + 1) % mca_pml.pml_max_tag;
    ret = module->p2p_tag_counter;
    OPAL_THREAD_UNLOCK(&module->p2p_lock);
    return ret;
#endif
}
예제 #15
0
/*
 * Duplicate an info
 */
int ompi_info_dup (ompi_info_t *info, ompi_info_t **newinfo)
{
    int err;
    opal_list_item_t *item;
    ompi_info_entry_t *iterator;

    OPAL_THREAD_LOCK(info->i_lock);
    for (item = opal_list_get_first(&(info->super));
         item != opal_list_get_end(&(info->super));
         item = opal_list_get_next(iterator)) {
         iterator = (ompi_info_entry_t *) item;
         err = ompi_info_set(*newinfo, iterator->ie_key, iterator->ie_value);
         if (MPI_SUCCESS != err) {
            OPAL_THREAD_UNLOCK(info->i_lock);
            return err;
         }
     }
    OPAL_THREAD_UNLOCK(info->i_lock);
     return MPI_SUCCESS;
}
예제 #16
0
int mca_btl_tcp_del_procs(struct mca_btl_base_module_t* btl,
                          size_t nprocs,
                          struct opal_proc_t **procs,
                          struct mca_btl_base_endpoint_t ** endpoints)
{
    mca_btl_tcp_module_t* tcp_btl = (mca_btl_tcp_module_t*)btl;
    size_t i;

    OPAL_THREAD_LOCK(&tcp_btl->tcp_endpoints_mutex);
    for( i = 0; i < nprocs; i++ ) {
        mca_btl_tcp_endpoint_t* tcp_endpoint = endpoints[i];
        if(tcp_endpoint->endpoint_proc != mca_btl_tcp_proc_local()) {
            opal_list_remove_item(&tcp_btl->tcp_endpoints, (opal_list_item_t*)tcp_endpoint);
            OBJ_RELEASE(tcp_endpoint);
        }
        opal_progress_event_users_decrement();
    }
    OPAL_THREAD_UNLOCK(&tcp_btl->tcp_endpoints_mutex);
    return OPAL_SUCCESS;
}
예제 #17
0
파일: oob_ud_req.c 프로젝트: 00datman/ompi
bool mca_oob_ud_req_is_in_list (mca_oob_ud_req_t *req, opal_list_t *list)
{
    opal_list_item_t *item;
    bool rc = false;

    OPAL_THREAD_LOCK(&mca_oob_ud_component.ud_match_lock);

    for (item = opal_list_get_first (list) ;
         item != opal_list_get_end (list) ;
         item = opal_list_get_next (item)) {
        if (item == (opal_list_item_t *) req) {
            rc = true;
            break;
        }
    }

    OPAL_THREAD_UNLOCK(&mca_oob_ud_component.ud_match_lock);

    return rc;
}
예제 #18
0
파일: orte_wait.c 프로젝트: bringhurst/ompi
int
orte_wait_cb_cancel(pid_t wpid)
{
    opal_process_handle_t* pending;

    OPAL_THREAD_LOCK(&mutex);

    /**
     * Do we have any registered callback for this particular pid ?
     */
    pending = find_pending_cb( wpid, false );
    if( NULL != pending ) {
        opal_list_remove_item( &registered_cb, (opal_list_item_t*)pending );
        OBJ_RELEASE( pending );
        OPAL_THREAD_UNLOCK(&mutex);
        return ORTE_SUCCESS;
    }
    OPAL_THREAD_UNLOCK(&mutex);
    return ORTE_ERR_BAD_PARAM;
}
int orte_iof_base_callback_delete(
    const orte_process_name_t* proc,
    int tag)
{
    orte_iof_base_endpoint_t* endpoint;
    opal_list_item_t* item;

    OPAL_THREAD_LOCK(&orte_iof_base.iof_lock);
    if(NULL == (endpoint = orte_iof_base_endpoint_lookup(proc,ORTE_IOF_SINK, tag))) {
        OPAL_THREAD_UNLOCK(&orte_iof_base.iof_lock);
        return ORTE_ERR_NOT_FOUND;
    }

    while(NULL != (item = opal_list_remove_first(&endpoint->ep_callbacks))) {
        OBJ_RELEASE(item);
    }
    OBJ_RELEASE(endpoint);
    OPAL_THREAD_UNLOCK(&orte_iof_base.iof_lock);
    return ORTE_SUCCESS;
}
예제 #20
0
/**********************************************************************
 *
 * Receive an accumulate on the target side
 *
 **********************************************************************/
static void
ompi_osc_pt2pt_sendreq_recv_accum_long_cb(ompi_osc_pt2pt_longreq_t *longreq)
{
    ompi_osc_pt2pt_send_header_t *header = 
        (ompi_osc_pt2pt_send_header_t*) longreq->req_comp_cbdata;
    void *payload = (void*) (header + 1);
    int ret;

    /* lock the window for accumulates */
    OPAL_THREAD_LOCK(&longreq->req_module->p2p_acc_lock);

    opal_list_remove_item(&(longreq->req_module->p2p_long_msgs), 
                          &(longreq->super.super));

    /* copy the data from the temporary buffer into the user window */
    ret = ompi_osc_pt2pt_process_op(longreq->req_module, 
                                    header, 
                                    longreq->req_datatype, 
                                    longreq->req_op, 
                                    payload,
                                    header->hdr_msg_length);

    /* unlock the window for accumulates */
    OPAL_THREAD_UNLOCK(&longreq->req_module->p2p_acc_lock);
    
    opal_output_verbose(50, ompi_osc_base_output,
                        "%d finished receiving long accum message from %d",
                        longreq->req_module->p2p_comm->c_my_rank, 
                        header->hdr_origin);               

    /* free the temp buffer */
    free(longreq->req_comp_cbdata);

    /* Release datatype & op */
    OBJ_RELEASE(longreq->req_datatype);
    OBJ_RELEASE(longreq->req_op);

    OPAL_THREAD_ADD32(&(longreq->req_module->p2p_num_pending_in), -1);

    ompi_osc_pt2pt_longreq_free(longreq);
}
예제 #21
0
int mca_io_ompio_file_get_byte_offset (ompi_file_t *fh,
                                       OMPI_MPI_OFFSET_TYPE offset,
                                       OMPI_MPI_OFFSET_TYPE *disp)
{
    mca_io_ompio_data_t *data;
    int i, k, index;
    size_t temp_offset;

    data = (mca_io_ompio_data_t *) fh->f_io_selected_data;

    OPAL_THREAD_LOCK(&fh->f_lock);
    temp_offset = data->ompio_fh.f_view_extent *
        (offset*data->ompio_fh.f_etype_size / data->ompio_fh.f_view_size);
    

    i = (offset*data->ompio_fh.f_etype_size) % data->ompio_fh.f_view_size;
    index = 0;
    k = 0;

    while (1) {
        k = data->ompio_fh.f_decoded_iov[index].iov_len;
        if (i >= k) {
            i -= k;
            index++;
            if ( 0 == i ) {
                k=0;
                break;
            }
        }
        else {
            k=i;
            break;
        }
    }

    *disp = data->ompio_fh.f_disp + temp_offset +
        (OMPI_MPI_OFFSET_TYPE)(intptr_t)data->ompio_fh.f_decoded_iov[index].iov_base + k;
    OPAL_THREAD_UNLOCK(&fh->f_lock);

    return OMPI_SUCCESS;
}
예제 #22
0
파일: proc.c 프로젝트: XuanWang1982/ompi
/* in some cases, all PE procs are required to do a modex so they
 * can (at the least) exchange their architecture. Since we cannot
 * know in advance if this was required, we provide a separate function
 * to set the arch (instead of doing it inside of oshmem_proc_init) that
 * can be called after the modex completes in oshmem_shmem_init. Thus, we
 * know that - regardless of how the arch is known, whether via modex
 * or dropped in from a local daemon - the arch can be set correctly
 * at this time
 */
int oshmem_proc_set_arch(void)
{
    oshmem_proc_t *proc = NULL;
    opal_list_item_t *item = NULL;
    int ret = OSHMEM_SUCCESS;

    OPAL_THREAD_LOCK(&oshmem_proc_lock);

    for (item = opal_list_get_first(&oshmem_proc_list);
            item != opal_list_get_end(&oshmem_proc_list);
            item = opal_list_get_next(item)) {
        proc = (oshmem_proc_t*) item;

        if (OSHMEM_PROC_VPID(proc) != ORTE_PROC_MY_NAME->vpid) {
            /* if arch is different than mine, create a new convertor for this proc */
            if (proc->super.proc_arch != opal_local_arch) {
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
                OBJ_RELEASE(proc->super.proc_convertor);
                proc->super.proc_convertor = opal_convertor_create(proc->super.proc_arch, 0);
#else
                orte_show_help("help-shmem-runtime.txt",
                               "heterogeneous-support-unavailable",
                               true,
                               orte_process_info.nodename,
                               proc->super.proc_hostname == NULL ?
                                       "<hostname unavailable>" :
                                       proc->super.proc_hostname);
                OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
                return OSHMEM_ERR_NOT_SUPPORTED;
#endif
            }
        }
    }

    /* Set predefined groups */
    ret = oshmem_proc_group_init();

    OPAL_THREAD_UNLOCK(&oshmem_proc_lock);

    return ret;
}
예제 #23
0
int orte_grpcomm_base_set_proc_attr(const char *attr_name,
                                    const void *data,
                                    size_t size)
{
    int rc;
    
    OPAL_THREAD_LOCK(&mutex);
    
    OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
                         "%s grpcomm:set_proc_attr: setting attribute %s data size %lu",
                         ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                         attr_name, (unsigned long)size));

    /* Pack the attribute name information into the local buffer */
    if (ORTE_SUCCESS != (rc = opal_dss.pack(modex_buffer, &attr_name, 1, OPAL_STRING))) {
        ORTE_ERROR_LOG(rc);
        goto cleanup;
    }

    /* pack the size */
    if (ORTE_SUCCESS != (rc = opal_dss.pack(modex_buffer, &size, 1, OPAL_SIZE))) {
        ORTE_ERROR_LOG(rc);
        goto cleanup;
    }
    
    /* Pack the actual data into the buffer */
    if (0 != size) {
        if (ORTE_SUCCESS != (rc = opal_dss.pack(modex_buffer, (void *) data, size, OPAL_BYTE))) {
            ORTE_ERROR_LOG(rc);
            goto cleanup;
        }
    }
    
    /* track the number of entries */
    ++num_entries;
    
cleanup:
    OPAL_THREAD_UNLOCK(&mutex);
    
    return rc;
}
예제 #24
0
파일: oob_tcp_peer.c 프로젝트: aosm/openmpi
/*
 * A file descriptor is available/ready for send. Check the state
 * of the socket and take the appropriate action.
 */
static void mca_oob_tcp_peer_send_handler(int sd, short flags, void* user)
{
    mca_oob_tcp_peer_t* peer = (mca_oob_tcp_peer_t *)user;
    OPAL_THREAD_LOCK(&peer->peer_lock);
    switch(peer->peer_state) {
    case MCA_OOB_TCP_CONNECTING:
        mca_oob_tcp_peer_complete_connect(peer);
        break;
    case MCA_OOB_TCP_CONNECTED:
        {
        while(peer->peer_send_msg != NULL) {

            /* complete the current send */
            mca_oob_tcp_msg_t* msg = peer->peer_send_msg;
            if(mca_oob_tcp_msg_send_handler(msg, peer)) {
                mca_oob_tcp_msg_complete(msg, &peer->peer_name);
            } else {
                break;
            }

            /* if current completed - progress any pending sends */
            peer->peer_send_msg = (mca_oob_tcp_msg_t*)
                opal_list_remove_first(&peer->peer_send_queue);
        }
        
        /* if nothing else to do unregister for send event notifications */
        if(NULL == peer->peer_send_msg) {
            opal_event_del(&peer->peer_send_event);
        }
        break;
        }
    default:
        opal_output(0, "[%lu,%lu,%lu]-[%lu,%lu,%lu] mca_oob_tcp_peer_send_handler: invalid connection state (%d)",
            ORTE_NAME_ARGS(orte_process_info.my_name),
            ORTE_NAME_ARGS(&(peer->peer_name)),
            peer->peer_state);
        opal_event_del(&peer->peer_send_event);
        break;
    }
    OPAL_THREAD_UNLOCK(&peer->peer_lock);
}
예제 #25
0
static void mca_oob_ud_peer_msg_timeout (int fd, short event, void *ctx)
{
    mca_oob_ud_peer_t *peer = (mca_oob_ud_peer_t *) ctx;
    mca_oob_ud_msg_t  *msg  = (mca_oob_ud_msg_t *) opal_list_get_first (&peer->peer_flying_messages);

    OPAL_THREAD_LOCK(&peer->peer_lock);

    if (false == peer->peer_timer.active) {
        return;
    }

    peer->peer_timer.active = false;

    OPAL_OUTPUT_VERBOSE((10, mca_oob_base_output, "%s oob:ud:peer_msg_timeout timeout sending to peer "
                         "%s. first message = %" PRIu64 " which has length %d" , ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                         ORTE_NAME_PRINT(&peer->peer_name), msg->hdr->msg_id, msg->wr.sg_list[0].length));

    if (peer->peer_timer.tries == 0) {
        opal_list_item_t *item;


        while (NULL != (item = opal_list_remove_first (&peer->peer_flying_messages))) {
            msg = (mca_oob_ud_msg_t *) item;

            mca_oob_ud_msg_status_update (msg, MCA_OOB_UD_MSG_STATUS_TIMEOUT);
            if (msg->req) {
                mca_oob_ud_req_complete (msg->req, ORTE_ERR_TIMEOUT);
            }
        }

        OPAL_THREAD_UNLOCK(&peer->peer_lock);
        mca_oob_ud_peer_lost (peer);
        return;
    }

    peer->peer_timer.tries--;
    mca_oob_ud_peer_post_all (peer);
    mca_oob_ud_peer_start_timer (peer);

    OPAL_THREAD_UNLOCK(&peer->peer_lock);
}
예제 #26
0
파일: orte_wait.c 프로젝트: bringhurst/ompi
int
orte_wait_finalize(void)
{
    opal_list_item_t* item;

    OPAL_THREAD_LOCK(&mutex);
    OPAL_THREAD_UNLOCK(&mutex);

    while (NULL != (item = opal_list_remove_first(&pending_pids))) {
        OBJ_RELEASE(item);
    }
    while (NULL != (item = opal_list_remove_first(&registered_cb))) {
        OBJ_RELEASE(item);
    }

    OBJ_DESTRUCT(&mutex);
    OBJ_DESTRUCT(&registered_cb);
    OBJ_DESTRUCT(&pending_pids);

    return ORTE_SUCCESS;
}
예제 #27
0
/*
 * Look for an existing MX process instances based on the associated
 * ompi_proc_t instance.
 */
static mca_btl_mx_proc_t* mca_btl_mx_proc_lookup_ompi(ompi_proc_t* ompi_proc)
{
    mca_btl_mx_proc_t* mx_proc;

    OPAL_THREAD_LOCK(&mca_btl_mx_component.mx_lock);

    for( mx_proc = (mca_btl_mx_proc_t*)opal_list_get_first(&mca_btl_mx_component.mx_procs);
         mx_proc != (mca_btl_mx_proc_t*)opal_list_get_end(&mca_btl_mx_component.mx_procs);
         mx_proc  = (mca_btl_mx_proc_t*)opal_list_get_next(mx_proc) ) {

        if(mx_proc->proc_ompi == ompi_proc) {
            OPAL_THREAD_UNLOCK(&mca_btl_mx_component.mx_lock);
            return mx_proc;
        }

    }

    OPAL_THREAD_UNLOCK(&mca_btl_mx_component.mx_lock);

    return NULL;
}
예제 #28
0
static inline int dereg_mem(mca_mpool_base_registration_t *reg)
{
    mca_mpool_grdma_module_t *mpool_grdma = (mca_mpool_grdma_module_t *) reg->mpool;
    int rc;

    if(!(reg->flags & MCA_MPOOL_FLAGS_CACHE_BYPASS))
        reg->mpool->rcache->rcache_delete(reg->mpool->rcache, reg);

    /* Drop the rcache lock before deregistring the memory */
    OPAL_THREAD_UNLOCK(&reg->mpool->rcache->lock);
    rc = mpool_grdma->resources.deregister_mem(mpool_grdma->resources.reg_data,
                                               reg);
    OPAL_THREAD_LOCK(&reg->mpool->rcache->lock);

    if (OPAL_LIKELY(OMPI_SUCCESS == rc)) {
        OMPI_FREE_LIST_RETURN(&mpool_grdma->reg_list,
                              (ompi_free_list_item_t *) reg);
    }

    return rc;
}
예제 #29
0
/*
 * The free call mark the final stage in a request life-cycle. Starting from this
 * point the request is completed at both PML and user level, and can be used
 * for others p2p communications. Therefore, in the case of the OB1 PML it should
 * be added to the free request list.
 */
static int mca_pml_ob1_send_request_free(struct ompi_request_t** request)
{
    mca_pml_ob1_send_request_t* sendreq = *(mca_pml_ob1_send_request_t**)request;
    
    assert( false == sendreq->req_send.req_base.req_free_called );

    OPAL_THREAD_LOCK(&ompi_request_lock);
    sendreq->req_send.req_base.req_free_called = true;

    PERUSE_TRACE_COMM_EVENT( PERUSE_COMM_REQ_NOTIFY,
                             &(sendreq->req_send.req_base), PERUSE_SEND );

    if( true == sendreq->req_send.req_base.req_pml_complete ) {
        /* make buffer defined when the request is compeleted,
           and before releasing the objects. */
        MEMCHECKER(
            memchecker_call(&opal_memchecker_base_mem_defined,
                            sendreq->req_send.req_base.req_addr,
                            sendreq->req_send.req_base.req_count,
                            sendreq->req_send.req_base.req_datatype);
        );
예제 #30
0
/* Get a context to use for communication.
 * If TLS is supported, it will use the cached endpoint.
 * If not, it will invoke the normal round-robin assignment. */
mca_btl_ofi_context_t *get_ofi_context(mca_btl_ofi_module_t *btl)
{
#if OPAL_HAVE_THREAD_LOCAL
    /* With TLS, we cache the context we use. */
    static volatile int64_t cur_num = 0;

    if (OPAL_UNLIKELY(my_context == NULL)) {
        OPAL_THREAD_LOCK(&btl->module_lock);

        my_context = &btl->contexts[cur_num];
        cur_num = (cur_num + 1) %btl->num_contexts;

        OPAL_THREAD_UNLOCK(&btl->module_lock);
    }

    assert (my_context);
    return my_context;
#else
    return get_ofi_context_rr(btl);
#endif
}