Beispiel #1
0
static inline bool mca_rcache_rgpusm_deregister_lru (mca_rcache_base_module_t *rcache) {
    mca_rcache_rgpusm_module_t *rcache_rgpusm = (mca_rcache_rgpusm_module_t *) rcache;
    mca_rcache_base_registration_t *old_reg;
    int rc;

    /* Remove the registration from the cache and list before
       deregistering the memory */
    old_reg = (mca_rcache_base_registration_t*)
        opal_list_remove_first (&rcache_rgpusm->lru_list);
    if (NULL == old_reg) {
        opal_output_verbose(10, mca_rcache_rgpusm_component.output,
                            "RGPUSM: The LRU list is empty. There is nothing to deregister");
        return false;
    }

    mca_rcache_base_vma_delete (rcache_rgpusm->vma_module, old_reg);

    /* Drop the rcache lock while we deregister the memory */
    OPAL_THREAD_UNLOCK(&rcache->lock);
    assert(old_reg->ref_count == 0);
    rc = cuda_closememhandle (NULL, old_reg);
    OPAL_THREAD_LOCK(&rcache->lock);

    /* This introduces a potential leak of registrations if
       the deregistration fails to occur as we no longer have
       a reference to it. Is this possible? */
    if (OPAL_SUCCESS != rc) {
        opal_output_verbose(10, mca_rcache_rgpusm_component.output,
                            "RGPUSM: Failed to deregister the memory addr=%p, size=%d",
                            old_reg->base, (int)(old_reg->bound - old_reg->base + 1));
        return false;
    }

    opal_free_list_return (&rcache_rgpusm->reg_list,
                           (opal_free_list_item_t*)old_reg);
    rcache_rgpusm->stat_evicted++;

    return true;
}
Beispiel #2
0
oshmem_proc_t * oshmem_proc_find(const orte_process_name_t * name)
{
    oshmem_proc_t *proc, *rproc = NULL;
    orte_ns_cmp_bitmask_t mask;

    /* return the proc-struct which matches this jobid+process id */
    mask = ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
    OPAL_THREAD_LOCK(&oshmem_proc_lock);
    for (proc = (oshmem_proc_t*) opal_list_get_first(&oshmem_proc_list);
            proc != (oshmem_proc_t*) opal_list_get_end(&oshmem_proc_list);
            proc = (oshmem_proc_t*) opal_list_get_next(proc)) {
        if (OPAL_EQUAL
                == orte_util_compare_name_fields(mask,
                                                 (orte_process_name_t*)&proc->super.proc_name,
                                                 name)) {
            rproc = proc;
            break;
        }
    } OPAL_THREAD_UNLOCK(&oshmem_proc_lock);

    return rproc;
}
Beispiel #3
0
/*
 *  Request completed - free buffer and decrement pending count
 */
int mca_pml_base_bsend_request_fini(ompi_request_t* request)
{
    mca_pml_base_send_request_t* sendreq = (mca_pml_base_send_request_t*)request;
    if(sendreq->req_bytes_packed == 0 ||
       sendreq->req_addr == NULL ||
       sendreq->req_addr == sendreq->req_base.req_addr)
        return OMPI_SUCCESS;

    /* remove from list of pending requests */
    OPAL_THREAD_LOCK(&mca_pml_bsend_mutex);

    /* free buffer */
    mca_pml_bsend_allocator->alc_free(mca_pml_bsend_allocator, (void *)sendreq->req_addr);
    sendreq->req_addr = sendreq->req_base.req_addr;

    /* decrement count of buffered requests */
    if(--mca_pml_bsend_count == 0)
        opal_condition_signal(&mca_pml_bsend_condition);

    OPAL_THREAD_UNLOCK(&mca_pml_bsend_mutex);
    return OMPI_SUCCESS;
}
static int
mca_pml_cm_recv_request_free(struct ompi_request_t** request)
{
    mca_pml_cm_request_t* recvreq = *(mca_pml_cm_request_t**)request; 
    
    assert( false == recvreq->req_free_called );
    
    OPAL_THREAD_LOCK(&ompi_request_lock);
    recvreq->req_free_called = true;
    if( true == recvreq->req_pml_complete ) {
        if( MCA_PML_CM_REQUEST_RECV_THIN == recvreq->req_pml_type ) {
            MCA_PML_CM_THIN_RECV_REQUEST_RETURN((mca_pml_cm_hvy_recv_request_t*)recvreq );
        } else {
            MCA_PML_CM_HVY_RECV_REQUEST_RETURN((mca_pml_cm_hvy_recv_request_t*)recvreq );
        }            
    }

    OPAL_THREAD_UNLOCK(&ompi_request_lock);

    *request = MPI_REQUEST_NULL;
    return OMPI_SUCCESS;
} 
/* This function must be called with the rcache lock held */
static void do_unregistration_gc(struct mca_mpool_base_module_t *mpool)
{
    mca_mpool_rdma_module_t *mpool_rdma = (mca_mpool_rdma_module_t*)mpool;
    mca_mpool_base_registration_t *reg;

    do {
        /* Remove registration from garbage collection list
           before deregistering it */
        reg = (mca_mpool_base_registration_t *)
            opal_list_remove_first(&mpool_rdma->gc_list);
        mpool->rcache->rcache_delete(mpool->rcache, reg);

        /* Drop the rcache lock before calling dereg_mem as there
           may be memory allocations */
        OPAL_THREAD_UNLOCK(&mpool->rcache->lock);
        dereg_mem(mpool, reg);
        OPAL_THREAD_LOCK(&mpool->rcache->lock);

        OMPI_FREE_LIST_RETURN(&mpool_rdma->reg_list,
                (ompi_free_list_item_t*)reg);
    } while(!opal_list_is_empty(&mpool_rdma->gc_list));
}
Beispiel #6
0
/* progress an OSC request */
static int ompi_osc_rdma_req_comm_complete (ompi_request_t *request)
{
    ompi_osc_rdma_request_t *rdma_request = (ompi_osc_rdma_request_t *) request->req_complete_cb_data;
    ompi_osc_rdma_module_t *module = rdma_request->module;

    OPAL_OUTPUT_VERBOSE((10, ompi_osc_base_framework.framework_output,
                         "ompi_osc_rdma_req_comm_complete called tag = %d",
                         request->req_status.MPI_TAG));

    mark_outgoing_completion (module);

    OPAL_THREAD_LOCK(&ompi_request_lock);
    if (0 == --rdma_request->outstanding_requests) {
        ompi_osc_rdma_request_complete (rdma_request, request->req_status.MPI_ERROR);
    }
    OPAL_THREAD_UNLOCK(&ompi_request_lock);

    /* put this request on the garbage colletion list */
    osc_rdma_gc_add_request (request);

    return OMPI_SUCCESS;
}
Beispiel #7
0
int NBC_Start(NBC_Handle *handle) {
  int res;

  /* bozo case */
  if ((ompi_request_t *)handle == &ompi_request_empty) {
    return OMPI_SUCCESS;
  }

  /* kick off first round */
  handle->super.req_state = OMPI_REQUEST_ACTIVE;
  handle->super.req_status.MPI_ERROR = OMPI_SUCCESS;
  res = NBC_Start_round(handle);
  if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
    return res;
  }

  OPAL_THREAD_LOCK(&mca_coll_libnbc_component.lock);
  opal_list_append(&mca_coll_libnbc_component.active_requests, &(handle->super.super.super));
  OPAL_THREAD_UNLOCK(&mca_coll_libnbc_component.lock);

  return OMPI_SUCCESS;
}
OMPI_DECLSPEC void
mca_io_base_request_progress_add(void)
{
#if OMPI_ENABLE_PROGRESS_THREADS
    /* if we don't have a progress thread, make us have a progress
       thread */
    if (! thread_running) {
        OPAL_THREAD_LOCK(&progress_mutex);
        if (! thread_running) {
            thread_running = true;
            opal_thread_start(&progress_thread);
        }
        OPAL_THREAD_UNLOCK(&progress_mutex);
    }
#endif /* OMPI_ENABLE_PROGRESS_THREADS */

    OPAL_THREAD_ADD32(&mca_io_base_request_num_pending, 1);

#if OMPI_ENABLE_PROGRESS_THREADS
    opal_condition_signal(&progress_cond);
#endif /* OMPI_ENABLE_PROGRESS_THREADS */
}
Beispiel #9
0
int 
orte_wait_kill(int sig)
{
    opal_list_item_t* item;

    OPAL_THREAD_LOCK(&mutex);
    do_waitall(0);
    while (NULL != (item = opal_list_remove_first(&registered_cb))) {
        registered_cb_item_t *cb = (registered_cb_item_t*)item;
        pending_pids_item_t *pending = find_pending_pid(cb->pid,false);
        if(NULL == pending) {
            int status;
            kill(cb->pid, sig);
            waitpid(cb->pid,&status,0);
        } else {
            OBJ_RELEASE(pending);
        }
        OBJ_RELEASE(item);
    } 
    OPAL_THREAD_UNLOCK(&mutex);
    return ORTE_SUCCESS;
}
int
ompi_osc_pt2pt_component_isend(void *buf,
                               size_t count,
                               struct ompi_datatype_t *datatype,
                               int dest,
                               int tag,
                               struct ompi_communicator_t *comm,
                               ompi_request_t **request,
                               ompi_request_complete_fn_t callback,
                               void *cbdata)
{
    int ret;
    bool missed_callback;
    ompi_request_complete_fn_t tmp;

    ret = MCA_PML_CALL(isend(buf, count, datatype,
                             dest, tag, MCA_PML_BASE_SEND_STANDARD, comm, request));
    if (OMPI_SUCCESS != ret) return ret;

    /* lock the giant request mutex to update the callback data so
       that the PML can't mark the request as complete while we're
       updating the callback data, which means we can
       deterministically ensure the callback is only fired once and
       that we didn't miss it.  */
    OPAL_THREAD_LOCK(&ompi_request_lock);
    (*request)->req_complete_cb = callback;
    (*request)->req_complete_cb_data = cbdata;
    missed_callback = (*request)->req_complete;
    OPAL_THREAD_UNLOCK(&ompi_request_lock);

    if (missed_callback) {
        tmp = (*request)->req_complete_cb;
        (*request)->req_complete_cb = NULL;
        tmp(*request);
    }

    return OMPI_SUCCESS;
}
/* Caller MUST hold the matching lock before calling */
static inline int mca_oob_ud_find_recv (opal_list_t *list, const orte_process_name_t name,
                                        const int tag, mca_oob_ud_req_t **req)
{
    opal_list_item_t *item;
    int rc = ORTE_ERR_NOT_FOUND;

    *req = NULL;

    OPAL_THREAD_LOCK(&mca_oob_ud_component.ud_match_lock);

    for (item = opal_list_get_first (list) ; item != opal_list_get_end (list) ;
         item = opal_list_get_next (item)) {
        mca_oob_ud_req_t *recv_req = (mca_oob_ud_req_t *) item;

        OPAL_OUTPUT_VERBOSE((15, mca_oob_base_output, "%s oob:ud:find_recv matching against "
                             "peer: %s, tag: %d", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                             ORTE_NAME_PRINT(&recv_req->req_origin), recv_req->req_tag));

        if (OPAL_EQUAL == opal_dss.compare (&name, &recv_req->req_origin, ORTE_NAME) &&
            tag == recv_req->req_tag) {
            *req = recv_req;
            rc = ORTE_SUCCESS;
            break;
        }
    }

    OPAL_OUTPUT_VERBOSE((15, mca_oob_base_output, "%s oob:ud:find_recv %sfound",
                         ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_SUCCESS != rc ? "not " : ""));


    OPAL_THREAD_UNLOCK(&mca_oob_ud_component.ud_match_lock);

    if (ORTE_SUCCESS == rc) {
        mca_oob_ud_req_append_to_list (*req, NULL);
    }

    return rc;
}
Beispiel #12
0
bool mca_btl_tcp2_endpoint_accept(mca_btl_base_endpoint_t* btl_endpoint,
                                 struct sockaddr* addr, int sd)
{
    mca_btl_tcp_proc_t *endpoint_proc = btl_endpoint->endpoint_proc;
    const orte_process_name_t *this_proc = &(ompi_proc_local()->proc_name);
    int cmpval;

    if(NULL == btl_endpoint->endpoint_addr) {
        return false;
    }

    OPAL_THREAD_LOCK(&btl_endpoint->endpoint_recv_lock);
    OPAL_THREAD_LOCK(&btl_endpoint->endpoint_send_lock);

    cmpval = ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
                                    &endpoint_proc->proc_ompi->proc_name,
                                    this_proc);
    if((btl_endpoint->endpoint_sd < 0) ||
       (btl_endpoint->endpoint_state != MCA_BTL_TCP_CONNECTED &&
        cmpval < 0)) {
        mca_btl_tcp2_endpoint_close(btl_endpoint);
        btl_endpoint->endpoint_sd = sd;
        if(mca_btl_tcp2_endpoint_send_connect_ack(btl_endpoint) != OMPI_SUCCESS) {
            mca_btl_tcp2_endpoint_close(btl_endpoint);
            OPAL_THREAD_UNLOCK(&btl_endpoint->endpoint_send_lock);
            OPAL_THREAD_UNLOCK(&btl_endpoint->endpoint_recv_lock);
            return false;
        }
        mca_btl_tcp_endpoint_event_init(btl_endpoint);
        /* NOT NEEDED if we remove the PERSISTENT flag when we create the
         * first recv_event.
         */
        opal_event_add(&btl_endpoint->endpoint_recv_event, 0);  /* TODO */
        mca_btl_tcp_endpoint_connected(btl_endpoint);
#if OPAL_ENABLE_DEBUG && WANT_PEER_DUMP
        mca_btl_tcp2_endpoint_dump(btl_endpoint, "accepted");
#endif
        OPAL_THREAD_UNLOCK(&btl_endpoint->endpoint_send_lock);
        OPAL_THREAD_UNLOCK(&btl_endpoint->endpoint_recv_lock);
        return true;
    }
    OPAL_THREAD_UNLOCK(&btl_endpoint->endpoint_send_lock);
    OPAL_THREAD_UNLOCK(&btl_endpoint->endpoint_recv_lock);
    return false;
}
Beispiel #13
0
static void orte_pls_base_cmd_ack(int status, orte_process_name_t* sender,
                                  orte_buffer_t* buffer, orte_rml_tag_t tag,
                                  void* cbdata)
{
    int ret;
    
    OPAL_THREAD_LOCK(&orte_pls_base.orted_cmd_lock);
    
    orted_cmd_num_active--;
    if (orted_cmd_num_active == 0) {
        opal_condition_signal(&orte_pls_base.orted_cmd_cond);
    } else {
        ret = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, ORTE_RML_TAG_PLS_ORTED_ACK,
                                      ORTE_RML_NON_PERSISTENT, orte_pls_base_cmd_ack, NULL);
        if (ret != ORTE_SUCCESS) {
            ORTE_ERROR_LOG(ret);
            return;
        }
    }
    
    OPAL_THREAD_UNLOCK(&orte_pls_base.orted_cmd_lock);
    return;
}
Beispiel #14
0
int
orte_wait_finalize(void)
{
    opal_list_item_t *item;

    OPAL_THREAD_LOCK(&mutex);
    opal_event_del(&handler);

    /* clear out the lists */
    while (NULL != (item = opal_list_remove_first(&pending_pids))) {
        OBJ_RELEASE(item);
    }
    while (NULL != (item = opal_list_remove_first(&registered_cb))) {
        OBJ_RELEASE(item);
    }
    OPAL_THREAD_UNLOCK(&mutex);

    OBJ_DESTRUCT(&mutex);
    OBJ_DESTRUCT(&pending_pids);
    OBJ_DESTRUCT(&registered_cb);

    return ORTE_SUCCESS;
}
Beispiel #15
0
static inline int32_t
create_send_tag(ompi_osc_pt2pt_module_t *module)
{
#if OMPI_HAVE_THREAD_SUPPORT && OPAL_HAVE_ATOMIC_CMPSET_32
    int32_t newval, oldval;
    do {
        oldval = module->p2p_tag_counter;
        newval = (oldval + 1) % mca_pml.pml_max_tag;
    } while (0 == opal_atomic_cmpset_32(&module->p2p_tag_counter, oldval, newval));
    return newval;
#elif OMPI_HAVE_THREAD_SUPPORT 
    int32_t ret;
    /* no compare and swap - have to lock the module */
    OPAL_THREAD_LOCK(&module->p2p_lock);
    module->p2p_tag_counter = (module->p2p_tag_counter + 1) % mca_pml.pml_max_tag;
    ret = module->p2p_tag_counter;
    OPAL_THREAD_UNLOCK(&module->p2p_lock);
    return ret;
#else
    module->p2p_tag_counter = (module->p2p_tag_counter + 1) % mca_pml.pml_max_tag;
    return module->p2p_tag_counter;
#endif
}
int
ompi_osc_pt2pt_module_wait(ompi_win_t *win)
{
    ompi_group_t *group;
    ompi_osc_pt2pt_module_t *module = P2P_MODULE(win);

    OPAL_THREAD_LOCK(&module->p2p_lock);
    while (0 != (module->p2p_num_pending_in) ||
           0 != (module->p2p_num_complete_msgs)) {
        opal_condition_wait(&module->p2p_cond, &module->p2p_lock);
    }

    group = module->p2p_pw_group;
    module->p2p_pw_group = NULL;
    OPAL_THREAD_UNLOCK(&module->p2p_lock);

    ompi_win_remove_mode(win, OMPI_WIN_EXPOSE_EPOCH | OMPI_WIN_POSTED);

    ompi_group_decrement_proc_count(group);
    OBJ_RELEASE(group);

    return OMPI_SUCCESS;
}
Beispiel #17
0
int
mca_io_romio314_file_get_info (ompi_file_t *fh,
                            opal_info_t ** info_used)
{
    int ret;
    mca_io_romio314_data_t *data;

// An opal_info_t isn't a full ompi_info_t. so if we're using an MPI call
// below with an MPI_Info, we need to create an equivalent MPI_Info. This
// isn't ideal but it only happens a few places.
    ompi_info_t *ompi_info;
    ompi_info = OBJ_NEW(ompi_info_t);
    if (!ompi_info) { return(MPI_ERR_NO_MEM); }

    data = (mca_io_romio314_data_t *) fh->f_io_selected_data;
    OPAL_THREAD_LOCK (&mca_io_romio314_mutex);
    ret = ROMIO_PREFIX(MPI_File_get_info) (data->romio_fh, &ompi_info);
    OPAL_THREAD_UNLOCK (&mca_io_romio314_mutex);

    opal_info_dup (&(ompi_info->super), info_used);
    ompi_info_free(&ompi_info);
    return ret;
}
Beispiel #18
0
int
orte_wait_cb(pid_t wpid, orte_wait_fn_t callback, void *data)
{
    opal_process_handle_t* handle;

    if (wpid <= 0) return ORTE_ERR_NOT_IMPLEMENTED;
    if (NULL == callback) return ORTE_ERR_BAD_PARAM;

    OPAL_THREAD_LOCK(&mutex);
    handle = find_pending_pid(wpid, false);
    if( handle != NULL ) {
        opal_list_remove_item( &pending_pids, (opal_list_item_t*)handle );
        OBJ_RELEASE(handle);
        return ORTE_SUCCESS;
    }
    handle = find_pending_cb( wpid, true );
    handle->pid = wpid;
    handle->callback = callback;
    handle->data = data;

    if( false == RegisterWaitForSingleObject( &handle->registered_handle, (HANDLE)handle->pid, 
                                              trigger_process_detection, (void*)handle, INFINITE,
                                              WT_EXECUTEONLYONCE | WT_EXECUTELONGFUNCTION) ) {
        DWORD errcode = GetLastError();
        char* localbuf = NULL;

        FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
                       NULL, errcode, 0, (LPTSTR)&localbuf, 1024, NULL );
        opal_output( 0, "Failed to initialize the process callback for pid %lu error %s\n",
                        (unsigned long)(handle->pid), localbuf );
        LocalFree( localbuf );
    }

    OPAL_THREAD_UNLOCK(&mutex);

    return OPAL_SUCCESS;
}
Beispiel #19
0
Datei: proc.c Projekt: IanYXXL/A1
static ompi_proc_t *
ompi_proc_find_and_add(const ompi_process_name_t * name, bool* isnew)
{
    ompi_proc_t *proc, *rproc = NULL;
    ompi_rte_cmp_bitmask_t mask;
    
    /* return the proc-struct which matches this jobid+process id */
    mask = OMPI_RTE_CMP_JOBID | OMPI_RTE_CMP_VPID;
    OPAL_THREAD_LOCK(&ompi_proc_lock);
    for(proc =  (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
        proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
        proc =  (ompi_proc_t*)opal_list_get_next(proc)) {
        if (OPAL_EQUAL == ompi_rte_compare_name_fields(mask, &proc->proc_name, name)) {
            rproc = proc;
            *isnew = false;
            break;
        }
    }
    
    /* if we didn't find this proc in the list, create a new
     * proc_t and append it to the list
     */
    if (NULL == rproc) {
        *isnew = true;
        rproc = OBJ_NEW(ompi_proc_t);
        if (NULL != rproc) {
            opal_list_append(&ompi_proc_list, (opal_list_item_t*)rproc);
            rproc->proc_name = *name;
        }
        /* caller had better fill in the rest of the proc, or there's
         going to be pain later... */
    }
    
    OPAL_THREAD_UNLOCK(&ompi_proc_lock);
    
    return rproc;
}
Beispiel #20
0
static int register_cache_bypass(mca_mpool_base_module_t *mpool,
        void *addr, size_t size, uint32_t flags,
        mca_mpool_base_registration_t **reg)
{
    mca_mpool_rdma_module_t *mpool_rdma = (mca_mpool_rdma_module_t*)mpool;
    mca_mpool_base_registration_t *rdma_reg;
    ompi_free_list_item_t *item;
    unsigned char *base, *bound;
    int rc;

    base = down_align_addr(addr, mca_mpool_base_page_size_log);
    bound = up_align_addr( (void*) ((char*) addr + size - 1),
             mca_mpool_base_page_size_log);
    OMPI_FREE_LIST_GET(&mpool_rdma->reg_list, item, rc);
    if(OMPI_SUCCESS != rc) {
        OPAL_THREAD_UNLOCK(&mpool->rcache->lock);
        return rc;
    }
    rdma_reg = (mca_mpool_base_registration_t*)item;

    rdma_reg->mpool = mpool;
    rdma_reg->base = base;
    rdma_reg->bound = bound;
    rdma_reg->flags = flags;

    rc = mpool_rdma->resources.register_mem(mpool_rdma->resources.reg_data,
            base, bound - base + 1, rdma_reg);

    if(rc != OMPI_SUCCESS) {
        OMPI_FREE_LIST_RETURN(&mpool_rdma->reg_list, item);
        return rc;
    }

    *reg = rdma_reg;
    (*reg)->ref_count++;
    return OMPI_SUCCESS;
}
int mca_oob_tcp_recv_cancel(
    orte_process_name_t* name, 
    int tag)
{
    int matched = 0;
    opal_list_item_t *item, *next;

    /* wait for any previously matched messages to be processed */
    OPAL_THREAD_LOCK(&mca_oob_tcp_component.tcp_match_lock);
#if OMPI_ENABLE_PROGRESS_THREADS
    if(opal_event_progress_thread() == false) {
        while(mca_oob_tcp_component.tcp_match_count) {
            opal_condition_wait(
                &mca_oob_tcp_component.tcp_match_cond, 
                &mca_oob_tcp_component.tcp_match_lock);
        }
    }
#endif

    /* remove any matching posted receives */
    for(item =  opal_list_get_first(&mca_oob_tcp_component.tcp_msg_post);
        item != opal_list_get_end(&mca_oob_tcp_component.tcp_msg_post);
        item =  next) {
        mca_oob_tcp_msg_t* msg = (mca_oob_tcp_msg_t*)item;
        next = opal_list_get_next(item);

        if (OPAL_EQUAL == opal_dss.compare(name, &msg->msg_peer, ORTE_NAME)) {
            if (msg->msg_hdr.msg_tag == tag) {
                opal_list_remove_item(&mca_oob_tcp_component.tcp_msg_post, &msg->super.super);
                MCA_OOB_TCP_MSG_RETURN(msg);
                matched++;
            }
        }
    }
    OPAL_THREAD_UNLOCK(&mca_oob_tcp_component.tcp_match_lock);
    return (matched > 0) ? ORTE_SUCCESS : ORTE_ERR_NOT_FOUND;
}
Beispiel #22
0
/* eager get */
static void mca_btl_ugni_callback_eager_get_progress_pending (struct mca_btl_base_module_t *btl, struct mca_btl_base_endpoint_t *endpoint,
                                                              struct mca_btl_base_descriptor_t *desc, int rc)
{
    mca_btl_ugni_module_t *ugni_module = (mca_btl_ugni_module_t *) btl;
    mca_btl_ugni_base_frag_t *pending_frag, *frag = (mca_btl_ugni_base_frag_t *) desc;

    OPAL_THREAD_LOCK(&ugni_module->eager_get_pending_lock);
    pending_frag = (mca_btl_ugni_base_frag_t *) opal_list_remove_first (&ugni_module->eager_get_pending);
    OPAL_THREAD_UNLOCK(&ugni_module->eager_get_pending_lock);

    if (NULL != pending_frag) {
        /* copy the relevant data out of the pending fragment */
        frag->endpoint = pending_frag->endpoint;

        /* start the next eager get using this fragment */
        (void) mca_btl_ugni_start_eager_get (frag->endpoint, pending_frag->hdr.eager_ex, frag);

        /* return the temporary fragment */
        mca_btl_ugni_frag_return (pending_frag);
    } else {
        /* not needed anymore */
        mca_btl_ugni_frag_return (frag);
    }
}
Beispiel #23
0
int mca_io_ompio_file_get_position_shared (ompi_file_t *fp,
                                           OMPI_MPI_OFFSET_TYPE * offset)
{
    int ret = OMPI_SUCCESS;
    mca_io_ompio_data_t *data;
    mca_io_ompio_file_t *fh;
    mca_sharedfp_base_module_t * shared_fp_base_module;

    data = (mca_io_ompio_data_t *) fp->f_io_selected_data;
    fh = &data->ompio_fh;

    /*get the shared fp module associated with this file*/
    shared_fp_base_module = fh->f_sharedfp;
    if ( NULL == shared_fp_base_module ){
        opal_output(0, "No shared file pointer component found for this communicator. Can not execute\n");
        return OMPI_ERROR;
    }
    OPAL_THREAD_LOCK(&fp->f_lock);
    ret = shared_fp_base_module->sharedfp_get_position(fh,offset);
    *offset = *offset / fh->f_etype_size;
    OPAL_THREAD_UNLOCK(&fp->f_lock);

    return ret;
}
Beispiel #24
0
/*
 * Front-end function called by the C MPI API functions to set an
 * attribute.
 */
int ompi_attr_set_c(ompi_attribute_type_t type, void *object,
                    opal_hash_table_t **attr_hash,
                    int key, void *attribute, bool predefined)
{
    int ret;
    attribute_value_t *new_attr = OBJ_NEW(attribute_value_t);
    if (NULL == new_attr) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    OPAL_THREAD_LOCK(&attribute_lock);

    new_attr->av_value = attribute;
    new_attr->av_set_from = OMPI_ATTRIBUTE_C;
    ret = set_value(type, object, attr_hash, key, new_attr, predefined);
    if (OMPI_SUCCESS != ret) {
        OBJ_RELEASE(new_attr);
    }

    opal_atomic_wmb();
    OPAL_THREAD_UNLOCK(&attribute_lock);

    return ret;
}
Beispiel #25
0
int mca_io_ompio_file_write_at_all_begin (ompi_file_t *fh,
					  OMPI_MPI_OFFSET_TYPE offset,
					  const void *buf,
					  int count,
					  struct ompi_datatype_t *datatype)
{
    int ret = OMPI_SUCCESS;
    mca_common_ompio_data_t *data=NULL;
    ompio_file_t *fp=NULL;

    data = (mca_common_ompio_data_t *) fh->f_io_selected_data;
    fp = &data->ompio_fh;

    if ( true == fp->f_split_coll_in_use ) {
	printf("Only one split collective I/O operation allowed per file handle at any given point in time!\n");
	return MPI_ERR_REQUEST;
    }
    OPAL_THREAD_LOCK(&fh->f_lock);
    ret = mca_common_ompio_file_iwrite_at_all ( fp, offset, buf, count, datatype, &fp->f_split_coll_req );
    OPAL_THREAD_UNLOCK(&fh->f_lock);
    fp->f_split_coll_in_use = true;

    return ret;
}
int mca_mpool_rdma_release_memory(struct mca_mpool_base_module_t *mpool,
        void *base, size_t size)
{
    mca_mpool_rdma_module_t *mpool_rdma = (mca_mpool_rdma_module_t*)mpool;
    mca_mpool_base_registration_t *reg;
    mca_mpool_base_registration_t *regs[RDMA_MPOOL_NREGS];
    int reg_cnt, i, err = 0;

    OPAL_THREAD_LOCK(&mpool->rcache->lock);
    do {
        reg_cnt = mpool->rcache->rcache_find_all(mpool->rcache, base, size,
                regs, RDMA_MPOOL_NREGS);

        for(i = 0; i < reg_cnt; i++) {
            reg = regs[i];

            reg->flags |= MCA_MPOOL_FLAGS_INVALID;
            if(reg->ref_count) {
                /* memory is being freed, but there are registration in use that
                 * covers the memory. This can happen even in a correct program,
                 * but may also be an user error. We can't tell. Mark the
                 * registration as invalid. It will not be used any more and
                 * will be unregistered when ref_count will become zero */
                err++; /* tell caller that something was wrong */
                continue;
            }

            opal_list_remove_item(&mpool_rdma->mru_list,(opal_list_item_t*)reg);
            opal_list_append(&mpool_rdma->gc_list, (opal_list_item_t*)reg);
        }
    } while(reg_cnt == RDMA_MPOOL_NREGS);

    OPAL_THREAD_UNLOCK(&mpool->rcache->lock);

    return err ? OMPI_ERROR : OMPI_SUCCESS;
}
Beispiel #27
0
int mca_io_ompio_file_write_ordered_end (ompi_file_t *fp,
					 const void *buf,
					 ompi_status_public_t *status)
{
    int ret = OMPI_SUCCESS;
    mca_common_ompio_data_t *data;
    ompio_file_t *fh;
    mca_sharedfp_base_module_t * shared_fp_base_module;

    data = (mca_common_ompio_data_t *) fp->f_io_selected_data;
    fh = &data->ompio_fh;

    /*get the shared fp module associated with this file*/
    shared_fp_base_module = fh->f_sharedfp;
    if ( NULL == shared_fp_base_module ){
        opal_output(0, "No shared file pointer component found for this communicator. Can not execute\n");
	return OMPI_ERROR;
    }
    OPAL_THREAD_LOCK(&fp->f_lock);
    ret = shared_fp_base_module->sharedfp_write_ordered_end(fh,buf,status);
    OPAL_THREAD_UNLOCK(&fp->f_lock);

    return ret;
}
Beispiel #28
0
/*
 * Get a value from an info
 */
int ompi_info_get (ompi_info_t *info, const char *key, int valuelen,
                   char *value, int *flag)
{
    ompi_info_entry_t *search;
    int value_length;

    OPAL_THREAD_LOCK(info->i_lock);
    search = info_find_key (info, key);
    if (NULL == search){
        *flag = 0;
    } else {
        /*
         * We have found the element, so we can return the value
         * Set the flag, value_length and value
         */
         *flag = 1;
         value_length = strlen(search->ie_value);
         /*
          * If the stored value is shorter than valuelen, then
          * we can copy the entire value out. Else, we have to
          * copy ONLY valuelen bytes out
          */
          if (value_length < valuelen ) {
               strcpy(value, search->ie_value);
          } else {
               opal_strncpy(value, search->ie_value, valuelen);
               if (MPI_MAX_INFO_VAL == valuelen) {
                   value[valuelen-1] = 0;
               } else {
                   value[valuelen] = 0;
               }
          }
    }
    OPAL_THREAD_UNLOCK(info->i_lock);
    return MPI_SUCCESS;
}
Beispiel #29
0
void mca_pml_ob1_process_pending_rdma(void)
{
    mca_pml_ob1_rdma_frag_t* frag;
    int32_t i, rc, s = (int32_t)opal_list_get_size(&mca_pml_ob1.rdma_pending);

    for(i = 0; i < s; i++) {
        OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
        frag = (mca_pml_ob1_rdma_frag_t*)
            opal_list_remove_first(&mca_pml_ob1.rdma_pending);
        OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
        if(NULL == frag)
            break;

        frag->retries++;

        if(frag->rdma_state == MCA_PML_OB1_RDMA_PUT) {
            rc = mca_pml_ob1_send_request_put_frag(frag);
        } else {
            rc = mca_pml_ob1_recv_request_get_frag(frag);
        }
        if(OMPI_ERR_OUT_OF_RESOURCE == rc)
            break;
    }
}
Beispiel #30
0
void mca_btl_openib_proc_destruct(mca_btl_openib_proc_t* ib_proc)
{
    /* remove from list of all proc instances */
    OPAL_THREAD_LOCK(&mca_btl_openib_component.ib_lock);
    opal_list_remove_item(&mca_btl_openib_component.ib_procs, &ib_proc->super);
    OPAL_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);

    /* release resources */
    if(NULL != ib_proc->proc_endpoints) {
        free(ib_proc->proc_endpoints);
    }
    if (NULL != ib_proc->proc_ports) {
        int i, j;
        for (i = 0; i < ib_proc->proc_port_count; ++i) {
            for (j = 0; j < ib_proc->proc_ports[i].pm_cpc_data_count; ++j) {
                if (NULL != ib_proc->proc_ports[i].pm_cpc_data[j].cbm_modex_message) {
                    free(ib_proc->proc_ports[i].pm_cpc_data[j].cbm_modex_message);
                }
            }
        }
        free(ib_proc->proc_ports);
    }
    OBJ_DESTRUCT(&ib_proc->proc_lock);
}