orte_proc_t* orte_rmaps_base_setup_proc(orte_job_t *jdata,
                                        orte_node_t *node,
                                        orte_app_idx_t idx)
{
    orte_proc_t *proc;
    int rc;

    proc = OBJ_NEW(orte_proc_t);
    /* set the jobid */
    proc->name.jobid = jdata->jobid;
    /* flag the proc as ready for launch */
    proc->state = ORTE_PROC_STATE_INIT;
    proc->app_idx = idx;

    OBJ_RETAIN(node);  /* maintain accounting on object */    
    proc->node = node;
    proc->nodename = node->name;
    node->num_procs++;
    if (node->slots_inuse < node->slots) {
        node->slots_inuse += orte_rmaps_base.cpus_per_rank;
    }
    if (0 > (rc = opal_pointer_array_add(node->procs, (void*)proc))) {
        ORTE_ERROR_LOG(rc);
        OBJ_RELEASE(proc);
        return NULL;
    }
    /* retain the proc struct so that we correctly track its release */
    OBJ_RETAIN(proc);

    return proc;
}
示例#2
0
int orte_rmaps_base_add_proc_to_map(orte_job_map_t *map, orte_node_t *node,
                                    bool oversubscribed, orte_proc_t *proc)
{
    orte_std_cntr_t i;
    orte_node_t *node_from_map;
    int rc;
    
    /* see if this node has already been assigned to the map - if
     * not, then add the pointer to the pointer array
     */
    for (i=0; i < map->nodes->size; i++) {
        if (NULL == (node_from_map = (orte_node_t*)opal_pointer_array_get_item(map->nodes, i))) {
            continue;
        }
        if (node_from_map->index == node->index) {
            /* we have this node in the array */
            goto PROCESS;
        }
    }
    /* if we get here, then this node isn't already in the map - add it */
    OPAL_OUTPUT_VERBOSE((5, orte_rmaps_base.rmaps_output,
                         "%s rmaps:base: adding node %s to map",
                         ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                         (NULL == node->name) ? "NULL" : node->name));
    
    if (ORTE_SUCCESS > (rc = opal_pointer_array_add(map->nodes, (void*)node))) {
        ORTE_ERROR_LOG(rc);
        return rc;
    }
    OBJ_RETAIN(node);  /* maintain accounting on object */
    ++map->num_nodes;
    
PROCESS:
    /* add the proc to this node's local processes - it is assumed
     * that the proc isn't already there as this would be an error
     * in the mapper
     */
    OPAL_OUTPUT_VERBOSE((5, orte_rmaps_base.rmaps_output,
                         "%s rmaps:base: mapping proc for job %s to node %s whose daemon is %s",
                         ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                         ORTE_JOBID_PRINT(proc->name.jobid),
                         (NULL == node->name) ? "NULL" : node->name,
                         (NULL == node->daemon) ? "NULL" : ORTE_NAME_PRINT(&(node->daemon->name))));
    
    if (0 > (rc = opal_pointer_array_add(node->procs, (void*)proc))) {
        ORTE_ERROR_LOG(rc);
        return rc;
    }
    /* retain the proc struct so that we correctly track its release */
    OBJ_RETAIN(proc);
    ++node->num_procs;

    /* update the oversubscribed state of the node */
    node->oversubscribed = oversubscribed;
    
    return ORTE_SUCCESS;
}
示例#3
0
/*
 * Back end to MPI_FILE_OPEN
 */
int ompi_file_open(struct ompi_communicator_t *comm, const char *filename,
                   int amode, struct ompi_info_t *info, ompi_file_t **fh)
{
    int ret;
    ompi_file_t *file;

    file = OBJ_NEW(ompi_file_t);
    if (NULL == file) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }


    /* Save the params */

    file->f_comm = comm;
    OBJ_RETAIN(comm);

    if (MPI_INFO_NULL != info) {
        if(NULL == file->f_info) {
            file->f_info = OBJ_NEW(ompi_info_t);
        }
        if (OMPI_SUCCESS != (ret = ompi_info_dup(info, &file->f_info))) {
            OBJ_RELEASE(file);
            return ret;
        }
    } else {
        file->f_info = MPI_INFO_NULL;
        OBJ_RETAIN(MPI_INFO_NULL);
    }

    file->f_amode = amode;
    file->f_filename = strdup(filename);
    if (NULL == file->f_filename) {
        OBJ_RELEASE(file);
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    /* Create the mutex */
    OBJ_CONSTRUCT(&file->f_mutex, opal_mutex_t);

    /* Select a module and actually open the file */

    if (OMPI_SUCCESS != (ret = mca_io_base_file_select(file, NULL))) {
        OBJ_RELEASE(file);
        return ret;
    }

    /* All done */

    *fh = file;
    return OMPI_SUCCESS;
}
示例#4
0
文件: win.c 项目: Benguang/ompi
static int alloc_window(struct ompi_communicator_t *comm, ompi_info_t *info, int flavor, ompi_win_t **win_out)
{
    ompi_win_t *win;
    ompi_group_t *group;
    int acc_ops, flag, ret;

    /* create the object */
    win = OBJ_NEW(ompi_win_t);
    if (NULL == win) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    ret = ompi_info_get_value_enum (info, "accumulate_ops", &acc_ops,
                                    OMPI_WIN_ACCUMULATE_OPS_SAME_OP_NO_OP,
                                    ompi_win_accumulate_ops, &flag);
    if (OMPI_SUCCESS != ret) {
        OBJ_RELEASE(win);
        return ret;
    }

    win->w_acc_ops = acc_ops;
    win->w_flavor = flavor;

    /* setup data that is independent of osc component */
    group = comm->c_local_group;
    OBJ_RETAIN(group);
    win->w_group = group;

    *win_out = win;

    return OMPI_SUCCESS;
}
示例#5
0
int ompi_grequest_start(
    MPI_Grequest_query_function *gquery_fn,
    MPI_Grequest_free_function *gfree_fn,
    MPI_Grequest_cancel_function *gcancel_fn,
    void* gstate,
    ompi_request_t** request)
{
    ompi_grequest_t *greq = OBJ_NEW(ompi_grequest_t);
    if(greq == NULL) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }
    /* We call RETAIN here specifically to increase the refcount to 2.
       See comment before the destructor for an explanation. */
    OBJ_RETAIN(greq);

    greq->greq_base.req_state = OMPI_REQUEST_ACTIVE;
    greq->greq_state = gstate;
    greq->greq_query.c_query = gquery_fn;
    greq->greq_free.c_free = gfree_fn;
    greq->greq_cancel.c_cancel = gcancel_fn; 
    greq->greq_base.req_status = ompi_status_empty;

    *request = &greq->greq_base;
    return OMPI_SUCCESS;
}
int MPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler)
{
    MPI_Errhandler tmp;

    OPAL_CR_NOOP_PROGRESS();

    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_win_invalid(win)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN,
                                          FUNC_NAME);
        } else if (NULL == errhandler) {
            return OMPI_ERRHANDLER_INVOKE(win, MPI_ERR_ARG,
                                          FUNC_NAME);
        }
    }

    /* On 64 bits environments we have to make sure the reading of the
       error_handler became atomic. */
    do {
        tmp = win->error_handler;
    } while (!OPAL_ATOMIC_CMPSET_PTR(&(win->error_handler), tmp, tmp));

    /* Retain the errhandler, corresponding to object refcount
       decrease in errhandler_free.c. */
    OBJ_RETAIN(win->error_handler);
    *errhandler = win->error_handler;

    /* All done */
    return MPI_SUCCESS;
}
int
ompi_osc_pt2pt_module_post(ompi_group_t *group,
                           int assert,
                           ompi_win_t *win)
{
    int i;
    ompi_osc_pt2pt_module_t *module = P2P_MODULE(win);

    OBJ_RETAIN(group);
    ompi_group_increment_proc_count(group);

    OPAL_THREAD_LOCK(&(module->p2p_lock));
    assert(NULL == module->p2p_pw_group);
    module->p2p_pw_group = group;    

    /* Set our mode to expose w/ post */
    ompi_win_remove_mode(win, OMPI_WIN_FENCE);
    ompi_win_append_mode(win, OMPI_WIN_EXPOSE_EPOCH | OMPI_WIN_POSTED);

    /* list how many complete counters we're still waiting on */
    module->p2p_num_complete_msgs +=
        ompi_group_size(module->p2p_pw_group);
    OPAL_THREAD_UNLOCK(&(module->p2p_lock));

    /* send a hello counter to everyone in group */
    for (i = 0 ; i < ompi_group_size(module->p2p_pw_group) ; ++i) {
        ompi_osc_pt2pt_control_send(module, 
                                    ompi_group_peer_lookup(group, i),
                                    OMPI_OSC_PT2PT_HDR_POST, 1, 0);
    }

    return OMPI_SUCCESS;
}
int orte_iof_base_callback_create(
    const orte_process_name_t* proc,
    int tag,
    orte_iof_base_callback_fn_t cbfunc,
    void *cbdata)
{
    orte_iof_base_callback_t* cb = OBJ_NEW(orte_iof_base_callback_t);
    orte_iof_base_endpoint_t* endpoint;
    if(NULL == cb)
        return ORTE_ERR_OUT_OF_RESOURCE;

    OPAL_THREAD_LOCK(&orte_iof_base.iof_lock);
    if((endpoint = orte_iof_base_endpoint_lookup(proc,ORTE_IOF_SINK,tag)) == NULL) {
        endpoint = OBJ_NEW(orte_iof_base_endpoint_t);
        if(NULL == endpoint) {
            OPAL_THREAD_UNLOCK(&orte_iof_base.iof_lock);
            return ORTE_ERR_OUT_OF_RESOURCE;
        }
        endpoint->ep_origin = *proc;
        endpoint->ep_mode = ORTE_IOF_SINK;
        endpoint->ep_tag = tag;
        endpoint->ep_fd = -1;
        opal_list_append(&orte_iof_base.iof_endpoints, &endpoint->super);
    } else {
        OBJ_RETAIN(endpoint);
    }
    cb->cb_func = cbfunc;
    cb->cb_data = cbdata;
    opal_list_append(&endpoint->ep_callbacks, (opal_list_item_t*)cb);
    OPAL_THREAD_UNLOCK(&orte_iof_base.iof_lock);
    return ORTE_SUCCESS;
}
示例#9
0
文件: win.c 项目: anandhis/ompi
int
ompi_win_group(ompi_win_t *win, ompi_group_t **group) {
    OBJ_RETAIN(win->w_group);
    *group = win->w_group;

    return OMPI_SUCCESS;
}
示例#10
0
文件: win.c 项目: anandhis/ompi
int
ompi_win_init(void)
{
    int ret;

    assert (sizeof (ompi_predefined_win_t) >= sizeof (ompi_win_t));

    /* setup window Fortran array */
    OBJ_CONSTRUCT(&ompi_mpi_windows, opal_pointer_array_t);
    if( OPAL_SUCCESS != opal_pointer_array_init(&ompi_mpi_windows, 4,
                                                OMPI_FORTRAN_HANDLE_MAX, 16) ) {
        return OMPI_ERROR;
    }

    /* Setup MPI_WIN_NULL */
    OBJ_CONSTRUCT(&ompi_mpi_win_null.win, ompi_win_t);
    ompi_mpi_win_null.win.w_flags = OMPI_WIN_INVALID;
    ompi_mpi_win_null.win.w_group = &ompi_mpi_group_null.group;
    OBJ_RETAIN(&ompi_mpi_group_null);
    ompi_win_set_name(&ompi_mpi_win_null.win, "MPI_WIN_NULL");
    opal_pointer_array_set_item(&ompi_mpi_windows, 0, &ompi_mpi_win_null.win);

    ret = mca_base_var_enum_create ("accumulate_ops", accumulate_ops_values, &ompi_win_accumulate_ops);
    if (OPAL_SUCCESS != ret) {
        return ret;
    }

    ret = mca_base_var_enum_create_flag ("accumulate_order", accumulate_order_flags, &ompi_win_accumulate_order);
    if (OPAL_SUCCESS != ret) {
        return ret;
    }

    return OMPI_SUCCESS;
}
static int link_items(repository_item_t *src, repository_item_t *depend)
{
  dependency_item_t *di;

  /* Bozo check */

  if (NULL == src || NULL == depend) {
    return OPAL_ERR_BAD_PARAM;
  }

  /* Make a new depedency item */

  di = OBJ_NEW(dependency_item_t);
  if (NULL == di) {
    return OPAL_ERR_OUT_OF_RESOURCE;
  }

  /* Initialize the new dependency item */

  di->di_repository_entry = depend;

  /* Add it to the dependency list on the source repository entry */

  opal_list_append(&src->ri_dependencies, (opal_list_item_t *) di);

  /* Increment the refcount in the dependency */

  OBJ_RETAIN(depend);

  /* All done */

  return OPAL_SUCCESS;
}
int MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler) 
{
    MPI_Errhandler tmp;

    OPAL_CR_NOOP_PROGRESS();

    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if (ompi_win_invalid(win)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN,
                                          FUNC_NAME);
        } else if (NULL == errhandler ||
                   MPI_ERRHANDLER_NULL == errhandler ||
                   (OMPI_ERRHANDLER_TYPE_WIN != errhandler->eh_mpi_object_type && 
                    OMPI_ERRHANDLER_TYPE_PREDEFINED != errhandler->eh_mpi_object_type) ) {
            return OMPI_ERRHANDLER_INVOKE(win, MPI_ERR_ARG, FUNC_NAME);
        }
    }

    /* Prepare the new error handler */
    OBJ_RETAIN(errhandler);

    /* Ditch the old errhandler, and decrement its refcount.  On 64
       bits environments we have to make sure the reading of the
       error_handler became atomic. */
    do {
        tmp = win->error_handler;
    } while (!OPAL_ATOMIC_CMPSET(&(win->error_handler), tmp, errhandler));
    OBJ_RELEASE(tmp);

    /* All done */
    return MPI_SUCCESS;
}
示例#13
0
int MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler)
{
    MPI_Errhandler tmp;

    OPAL_CR_NOOP_PROGRESS();

    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if (ompi_win_invalid(win)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN,
                                          FUNC_NAME);
        } else if (NULL == errhandler ||
                   MPI_ERRHANDLER_NULL == errhandler ||
                   (OMPI_ERRHANDLER_TYPE_WIN != errhandler->eh_mpi_object_type &&
                    OMPI_ERRHANDLER_TYPE_PREDEFINED != errhandler->eh_mpi_object_type) ) {
            return OMPI_ERRHANDLER_INVOKE(win, MPI_ERR_ARG, FUNC_NAME);
        }
    }

    /* Prepare the new error handler */
    OBJ_RETAIN(errhandler);

    OPAL_THREAD_LOCK(&win->w_lock);
    /* Ditch the old errhandler, and decrement its refcount. */
    tmp = win->error_handler;
    win->error_handler = errhandler;
    OBJ_RELEASE(tmp);
    OPAL_THREAD_UNLOCK(&win->w_lock);

    /* All done */
    return MPI_SUCCESS;
}
示例#14
0
/*
 * Initialize module on the communicator
 */
static int mca_coll_hcoll_module_enable(mca_coll_base_module_t *module,
                                        struct ompi_communicator_t *comm)
{
    mca_coll_hcoll_module_t *hcoll_module = (mca_coll_hcoll_module_t*) module;
    hcoll_module->comm = comm;
    if (OMPI_SUCCESS != __save_coll_handlers(hcoll_module)){
        HCOL_ERROR("coll_hcol: __save_coll_handlers failed");
        return OMPI_ERROR;
    }

    hcoll_set_runtime_tag_offset(-100,mca_pml.pml_max_tag);


    hcoll_module->hcoll_context =
        hcoll_create_context((rte_grp_handle_t)comm);
    if (NULL == hcoll_module->hcoll_context){
        HCOL_VERBOSE(1,"hcoll_create_context returned NULL");
        return OMPI_ERROR;
    }

    if (comm != &ompi_mpi_comm_world.comm){
        mca_coll_hcoll_module_list_item_wrapper_t *mw =
            OBJ_NEW(mca_coll_hcoll_module_list_item_wrapper_t);
        mw->module = hcoll_module;
        OBJ_RETAIN(hcoll_module->comm);
        opal_list_append(&mca_coll_hcoll_component.active_modules,
                         (opal_list_item_t*)mw);
    }

    return OMPI_SUCCESS;
}
/**
 * JOB
 */
int orte_dt_copy_job(orte_job_t **dest, orte_job_t *src, opal_data_type_t type)
{
    (*dest) = src;
    OBJ_RETAIN(src);
    
    return ORTE_SUCCESS;
}
示例#16
0
/*
 *  Initializes the mpool module.
 */
void mca_mpool_grdma_module_init(mca_mpool_grdma_module_t* mpool, mca_mpool_grdma_pool_t *pool)
{
    OBJ_RETAIN(pool);
    mpool->pool = pool;

    mpool->super.mpool_component = &mca_mpool_grdma_component.super;
    mpool->super.mpool_base = NULL; /* no base .. */
    mpool->super.mpool_alloc = mca_mpool_grdma_alloc;
    mpool->super.mpool_realloc = mca_mpool_grdma_realloc;
    mpool->super.mpool_free = mca_mpool_grdma_free;
    mpool->super.mpool_register = mca_mpool_grdma_register;
    mpool->super.mpool_find = mca_mpool_grdma_find;
    mpool->super.mpool_deregister = mca_mpool_grdma_deregister;
    mpool->super.mpool_release_memory = mca_mpool_grdma_release_memory;
    mpool->super.mpool_finalize = mca_mpool_grdma_finalize;
    mpool->super.mpool_ft_event = mca_mpool_grdma_ft_event;
    mpool->super.flags = MCA_MPOOL_FLAGS_MPI_ALLOC_MEM;
    mpool->super.rcache = pool->rcache;

    mpool->stat_cache_hit = mpool->stat_cache_miss = mpool->stat_evicted = 0;
    mpool->stat_cache_found = mpool->stat_cache_notfound = 0;

    OBJ_CONSTRUCT(&mpool->reg_list, ompi_free_list_t);
    ompi_free_list_init_new(&mpool->reg_list, mpool->resources.sizeof_reg,
                            opal_cache_line_size,
                            OBJ_CLASS(mca_mpool_base_registration_t), 
                            0, opal_cache_line_size, 0, -1, 32, NULL);
}
int MPI_File_get_errhandler( MPI_File file, MPI_Errhandler *errhandler)
{
    /* Error checking */

    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        /* Note that MPI-2:9.7 (p265 in the ps; 261 in the pdf) explicitly
           says that you are allowed to set the error handler on
           MPI_FILE_NULL */

        if (NULL == file) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_FILE,
                                          "MPI_File_get_errhandler");
        } else if (NULL == errhandler) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
                                          "MPI_File_get_errhandler");
        }
    }

    /* Retain the errhandler, corresponding to object refcount
       decrease in errhandler_free.c. */
    OBJ_RETAIN(file->error_handler);
    *errhandler = file->error_handler;

    /* All done */

    return MPI_SUCCESS;
}
示例#18
0
int
mca_io_romio314_file_close (ompi_file_t *fh)
{
    int ret;
    mca_io_romio314_data_t *data;

    /* If we've already started MPI_Finalize by this point, then just
       give up (because ROMIO's file close routine calls MPI_Barrier,
       which we obviously can't do if we've started to MPI_Finalize).
       The user didn't close the file, so they should expect
       unexpected behavior. */
    if (ompi_mpi_finalized) {
        return OMPI_SUCCESS;
    }

    /* Because ROMIO expects the MPI library to provide error handler
     * management routines but it doesn't ever participate in
     * MPI_File_close, we have to somehow inform the MPI library that
     * we no longer hold a reference to any user defined error
     * handler.  We do this by setting the errhandler at this point to
     * MPI_ERRORS_RETURN. */
    if (fh->error_handler != &ompi_mpi_errors_return.eh) {
        OBJ_RELEASE(fh->error_handler);
        fh->error_handler = &ompi_mpi_errors_return.eh;
        OBJ_RETAIN(fh->error_handler);
    }

    data = (mca_io_romio314_data_t *) fh->f_io_selected_data;

    OPAL_THREAD_LOCK (&mca_io_romio314_mutex);
    ret = ROMIO_PREFIX(MPI_File_close) (&data->romio_fh);
    OPAL_THREAD_UNLOCK (&mca_io_romio314_mutex);

    return ret;
}
示例#19
0
int MPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler) 
{
  /* Error checking */

  if (MPI_PARAM_CHECK) {
    OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
    if (ompi_comm_invalid(comm)) {
      return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
                                    FUNC_NAME);
    } else if (NULL == errhandler ||
               MPI_ERRHANDLER_NULL == errhandler ||
               ( OMPI_ERRHANDLER_TYPE_COMM != errhandler->eh_mpi_object_type &&
		 OMPI_ERRHANDLER_TYPE_PREDEFINED != errhandler->eh_mpi_object_type) ) {
      return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
                                    FUNC_NAME);
    }
  }

  /* Ditch the old errhandler, and decrement its refcount */

  OBJ_RELEASE(comm->error_handler);

  /* We have a valid comm and errhandler, so increment its refcount */

  comm->error_handler = errhandler;
  OBJ_RETAIN(comm->error_handler);

  /* All done */
  
  return MPI_SUCCESS;
}
示例#20
0
int ompi_group_incl_bmap(ompi_group_t* group, int n, const int *ranks,
                         ompi_group_t **new_group)
{
    /* local variables */
    int my_group_rank,i,bit_set;
    ompi_group_t *group_pointer, *new_group_pointer;

    group_pointer = (ompi_group_t *)group;

    if ( 0 == n ) {
        *new_group = MPI_GROUP_EMPTY;
        OBJ_RETAIN(MPI_GROUP_EMPTY);
        return OMPI_SUCCESS;
    }

    new_group_pointer = ompi_group_allocate_bmap(group->grp_proc_count, n);
    if( NULL == new_group_pointer ) {
        return MPI_ERR_GROUP;
    }
    /* Initialize the bit array to zeros */
    for (i=0 ; i<new_group_pointer->sparse_data.grp_bitmap.grp_bitmap_array_len ; i++) {
        new_group_pointer->
            sparse_data.grp_bitmap.grp_bitmap_array[i] = 0;
    }

    /* set the bits */
    for (i=0 ; i<n ; i++) {
        bit_set = ranks[i] % BSIZE;
        new_group_pointer->
            sparse_data.grp_bitmap.grp_bitmap_array[(int)(ranks[i]/BSIZE)] |= (1 << bit_set);
    }

    new_group_pointer -> grp_parent_group_ptr = group_pointer;

    OBJ_RETAIN(new_group_pointer -> grp_parent_group_ptr);
    ompi_group_increment_proc_count(new_group_pointer -> grp_parent_group_ptr);

    ompi_group_increment_proc_count(new_group_pointer);
    my_group_rank=group_pointer->grp_my_rank;

    ompi_group_translate_ranks (group_pointer,1,&my_group_rank,
                                new_group_pointer,&new_group_pointer->grp_my_rank);

    *new_group = (MPI_Group)new_group_pointer;

    return OMPI_SUCCESS;
}
示例#21
0
int
ompi_win_group(ompi_win_t *win, ompi_group_t **group) {
    OBJ_RETAIN(win->w_group);
    ompi_group_increment_proc_count(win->w_group);
    *group = win->w_group;

    return OMPI_SUCCESS;
}
示例#22
0
/*
 * Constructor
 */
static void file_constructor(ompi_file_t *file)
{
    /* Initialize the MPI_FILE_OPEN params */

    file->f_comm = NULL;
    file->f_filename = NULL;
    file->f_amode = 0;
    file->f_info = NULL;

    /* Initialize flags */

    file->f_flags = 0;

    /* Initialize the fortran <--> C translation index */

    file->f_f_to_c_index = opal_pointer_array_add(&ompi_file_f_to_c_table, 
                                                  file);

    /* Initialize the error handler.  Per MPI-2:9.7 (p265), the
       default error handler on file handles is the error handler on
       MPI_FILE_NULL, which starts out as MPI_ERRORS_RETURN (but can
       be changed by invoking MPI_FILE_SET_ERRHANDLER on
       MPI_FILE_NULL). */

    file->errhandler_type = OMPI_ERRHANDLER_TYPE_FILE;
    if (file != &ompi_mpi_file_null.file) {
        file->error_handler = ompi_mpi_file_null.file.error_handler;
    } else {
        file->error_handler = &ompi_mpi_errors_return.eh;
    }
    OBJ_RETAIN(file->error_handler);

    /* Initialize the module */

    file->f_io_version = MCA_IO_BASE_V_NONE;
    memset(&(file->f_io_selected_module), 0, 
           sizeof(file->f_io_selected_module));
    file->f_io_selected_data = NULL;

    /* If the user doesn't want us to ever free it, then add an extra
       RETAIN here */

    if (ompi_debug_no_free_handles) {
        OBJ_RETAIN(&(file->super));
    }
}
示例#23
0
文件: orterun.c 项目: 00datman/ompi
static void completed(int index, orte_job_t *jdata, int ret, void *cbdata)
{
    orte_submit_status_t *completest = (orte_submit_status_t*)cbdata;
    completest->status = ret;
    ORTE_UPDATE_EXIT_STATUS(ret);
    OBJ_RETAIN(jdata);
    completest->jdata = jdata;
    completest->active = false;
}
示例#24
0
int ompi_osc_rdma_lock_atomic (int lock_type, int target, int assert, ompi_win_t *win)
{
    ompi_osc_rdma_module_t *module = GET_MODULE(win);
    ompi_osc_rdma_peer_t *peer = ompi_osc_rdma_module_peer (module, target);
    ompi_osc_rdma_sync_t *lock;
    int ret = OMPI_SUCCESS;

    OSC_RDMA_VERBOSE(MCA_BASE_VERBOSE_TRACE, "lock: %d, %d, %d, %s", lock_type, target, assert, win->w_name);

    if (module->no_locks) {
        OSC_RDMA_VERBOSE(MCA_BASE_VERBOSE_INFO, "attempted to lock with no_locks set");
        return OMPI_ERR_RMA_SYNC;
    }

    if (module->all_sync.epoch_active && (OMPI_OSC_RDMA_SYNC_TYPE_LOCK != module->all_sync.type || MPI_LOCK_EXCLUSIVE == lock_type)) {
        /* impossible to get an exclusive lock while holding a global shared lock or in a active
         * target access epoch */
        return OMPI_ERR_RMA_SYNC;
    }

    /* clear the global sync object (in case MPI_Win_fence was called) */
    module->all_sync.type = OMPI_OSC_RDMA_SYNC_TYPE_NONE;

    /* create lock item */
    lock = ompi_osc_rdma_sync_allocate (module);
    if (OPAL_UNLIKELY(NULL == lock)) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    lock->type = OMPI_OSC_RDMA_SYNC_TYPE_LOCK;
    lock->sync.lock.target = target;
    lock->sync.lock.type = lock_type;
    lock->sync.lock.assert = assert;

    lock->peer_list.peer = peer;
    lock->num_peers = 1;
    OBJ_RETAIN(peer);

    if (0 == (assert & MPI_MODE_NOCHECK)) {
        ret = ompi_osc_rdma_lock_atomic_internal (module, peer, lock);
    }

    if (OPAL_LIKELY(OMPI_SUCCESS == ret)) {
        ++module->passive_target_access_epoch;

        opal_atomic_wmb ();

        OPAL_THREAD_SCOPED_LOCK(&module->lock, ompi_osc_rdma_module_lock_insert (module, lock));
    } else {
        OBJ_RELEASE(lock);
    }

    OSC_RDMA_VERBOSE(MCA_BASE_VERBOSE_TRACE, "lock %d complete", target);

    return ret;
}
示例#25
0
文件: group_init.c 项目: AT95/ompi
/*
 * increment the reference count of the proc structures
 */
void ompi_group_increment_proc_count(ompi_group_t *group)
{
    ompi_proc_t * proc_pointer;
    for (int proc = 0 ; proc < group->grp_proc_count ; ++proc) {
	proc_pointer = ompi_group_peer_lookup_existing (group, proc);
	if (proc_pointer) {
	    OBJ_RETAIN(proc_pointer);
	}
    }
}
示例#26
0
/*
 * increment the reference count of the proc structures
 */
void ompi_group_increment_proc_count(ompi_group_t *group)
{
    int proc;
    ompi_proc_t * proc_pointer;
    for (proc = 0; proc < group->grp_proc_count; proc++) {
        proc_pointer = ompi_group_peer_lookup(group,proc);
        OBJ_RETAIN(proc_pointer);
    }

    return;
}
示例#27
0
static int workflow_add_send_workflow(opal_buffer_t *buf, char **aggregator)
{
    orte_process_name_t wf_agg;
    orte_rml_recv_cb_t *xfer = NULL;
    int rc;
    int node_index;

    for (node_index = 0; node_index < opal_argv_count(aggregator); node_index++) {

        xfer = OBJ_NEW(orte_rml_recv_cb_t);
        if (NULL == xfer) {
            return ORCM_ERR_OUT_OF_RESOURCE;
        }

        workflow_output_setup(xfer);

        OBJ_RETAIN(buf);

        if (ORCM_SUCCESS != (rc = orcm_cfgi_base_get_hostname_proc(aggregator[node_index],
                                                                   &wf_agg))) {
            orcm_octl_error("node-notfound", aggregator[node_index]);
            orte_rml.recv_cancel(ORTE_NAME_WILDCARD, ORCM_RML_TAG_ANALYTICS);
            SAFE_RELEASE(xfer);
            return rc;
        }

        rc = workflow_send_buffer(&wf_agg, buf, xfer);
        if (ORCM_SUCCESS != rc) {
            orcm_octl_error("connection-fail");
            orte_rml.recv_cancel(ORTE_NAME_WILDCARD, ORCM_RML_TAG_ANALYTICS);
            SAFE_RELEASE(xfer);
            return rc;
        }

        /* unpack workflow id */
        ORCM_WAIT_FOR_COMPLETION(xfer->active, ORCM_OCTL_WAIT_TIMEOUT, &rc);
        if (ORCM_SUCCESS != rc) {
            orcm_octl_error("connection-fail");
            orte_rml.recv_cancel(ORTE_NAME_WILDCARD, ORCM_RML_TAG_ANALYTICS);
            SAFE_RELEASE(xfer);
            return rc;
        }

        rc = workflow_add_unpack_buffer(xfer);
        if (ORCM_SUCCESS != rc) {
            orte_rml.recv_cancel(ORTE_NAME_WILDCARD, ORCM_RML_TAG_ANALYTICS);
            SAFE_RELEASE(xfer);
            return rc;
        }
        orte_rml.recv_cancel(ORTE_NAME_WILDCARD, ORCM_RML_TAG_ANALYTICS);
        SAFE_RELEASE(xfer);
    }
    return ORCM_SUCCESS;
}
示例#28
0
static mca_pml_yalla_convertor_t *mca_pml_yalla_get_recv_convertor(void *buf, size_t count,
                                                                   ompi_datatype_t *datatype)
{
    mca_pml_yalla_convertor_t *convertor = PML_YALLA_FREELIST_GET(&ompi_pml_yalla.convs);

    convertor->datatype = datatype;
    OBJ_RETAIN(datatype);
    opal_convertor_copy_and_prepare_for_recv(ompi_proc_local_proc->proc_convertor,
                                             &datatype->super, count, buf, 0,
                                             &convertor->convertor);
    return convertor;
}
示例#29
0
int ompi_group_incl_strided(ompi_group_t* group, int n, int *ranks, 
			    ompi_group_t **new_group) 
{
    /* local variables */
    int my_group_rank,stride;
    ompi_group_t *group_pointer, *new_group_pointer;
    
    group_pointer = (ompi_group_t *)group;
    
    if ( 0 == n ) {
	*new_group = MPI_GROUP_EMPTY;
	OBJ_RETAIN(MPI_GROUP_EMPTY);
	return OMPI_SUCCESS;
    }

    stride = check_stride(ranks,n);
    new_group_pointer = ompi_group_allocate_strided();
    if( NULL == new_group_pointer ) {
        return MPI_ERR_GROUP;
    }
    new_group_pointer -> grp_parent_group_ptr = group_pointer;

    OBJ_RETAIN(new_group_pointer -> grp_parent_group_ptr);
    ompi_group_increment_proc_count(new_group_pointer -> grp_parent_group_ptr);

    new_group_pointer -> sparse_data.grp_strided.grp_strided_stride = stride;
    new_group_pointer -> sparse_data.grp_strided.grp_strided_offset = ranks[0];
    new_group_pointer -> sparse_data.grp_strided.grp_strided_last_element = ranks[n-1];
    new_group_pointer -> grp_proc_count = n;
          
    ompi_group_increment_proc_count(new_group_pointer);
    my_group_rank = group_pointer->grp_my_rank;
    ompi_group_translate_ranks (new_group_pointer->grp_parent_group_ptr,1,&my_group_rank,
				new_group_pointer,&new_group_pointer->grp_my_rank);

    *new_group = (MPI_Group)new_group_pointer;

     return OMPI_SUCCESS;
}
示例#30
0
文件: win.c 项目: anandhis/ompi
static int alloc_window(struct ompi_communicator_t *comm, opal_info_t *info, int flavor, ompi_win_t **win_out)
{
    ompi_win_t *win;
    ompi_group_t *group;
    int acc_ops, acc_order, flag, ret;

    /* create the object */
    win = OBJ_NEW(ompi_win_t);
    if (NULL == win) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    ret = opal_info_get_value_enum (info, "accumulate_ops", &acc_ops,
                                    OMPI_WIN_ACCUMULATE_OPS_SAME_OP_NO_OP,
                                    ompi_win_accumulate_ops, &flag);
    if (OMPI_SUCCESS != ret) {
        OBJ_RELEASE(win);
        return ret;
    }

    win->w_acc_ops = (ompi_win_accumulate_ops_t)acc_ops;

    ret = opal_info_get_value_enum (info, "accumulate_order", &acc_order,
                                    OMPI_WIN_ACC_ORDER_RAR | OMPI_WIN_ACC_ORDER_WAR |
                                    OMPI_WIN_ACC_ORDER_RAW | OMPI_WIN_ACC_ORDER_WAW,
                                    &(ompi_win_accumulate_order->super), &flag);
    if (OMPI_SUCCESS != ret) {
        OBJ_RELEASE(win);
        return ret;
    }

    win->w_acc_order = acc_order;

    win->w_flavor = flavor;

    /* setup data that is independent of osc component */
    group = comm->c_local_group;
    OBJ_RETAIN(group);
    win->w_group = group;

    /* Copy the info for the info layer */
    win->super.s_info = OBJ_NEW(opal_info_t);
    if (info) {
        opal_info_dup(info, &(win->super.s_info));
    }

    *win_out = win;

    return OMPI_SUCCESS;
}