Пример #1
0
int MPI_Free_mem(void *baseptr)
{
    OPAL_CR_ENTER_LIBRARY();

    /* Per these threads:

         http://www.open-mpi.org/community/lists/devel/2007/07/1977.php 
         http://www.open-mpi.org/community/lists/devel/2007/07/1979.php         

       If you call MPI_ALLOC_MEM with a size of 0, you get NULL
       back.  So don't consider a NULL==baseptr an error. */
    if (NULL != baseptr && OMPI_SUCCESS != mca_mpool_base_free(baseptr)) {
        OPAL_CR_EXIT_LIBRARY();
        return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NO_MEM, FUNC_NAME);
    }

    OPAL_CR_EXIT_LIBRARY();
    return MPI_SUCCESS;
}
Пример #2
0
int OMPI_CR_Quiesce_start(MPI_Comm commP, MPI_Info *info)
{
    int ret = MPI_SUCCESS;
    MPI_Comm comm = MPI_COMM_WORLD; /* Currently ignore provided comm */
    orte_snapc_base_request_op_t *datum = NULL;
    int my_rank;

    /* argument checking */
    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
    }

    /*
     * Setup the data structure for the operation
     */
    datum = OBJ_NEW(orte_snapc_base_request_op_t);
    datum->event = ORTE_SNAPC_OP_QUIESCE_START;
    datum->is_active = true;

    MPI_Comm_rank(comm, &my_rank);
    if( 0 == my_rank ) {
        datum->leader = ORTE_PROC_MY_NAME->vpid;
    } else {
        datum->leader = -1; /* Unknown from non-root ranks */
    }

    /*
     * All processes must make this call before it can start
     */
    MPI_Barrier(comm);

    /*
     * Leader sends the request
     */
    OPAL_CR_ENTER_LIBRARY();
    ret = orte_snapc.request_op(datum);
    /*ret = ompi_crcp_base_quiesce_start(info);*/
    if( OMPI_SUCCESS != ret ) {
        OBJ_RELEASE(datum);
        OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OTHER, 
                               FUNC_NAME);
    }

    OPAL_CR_EXIT_LIBRARY();

    datum->is_active = false;
    OBJ_RELEASE(datum);

    /*
     * (Old) info logic
     */
    /*ompi_info_set((ompi_info_t*)*info, "target", cur_datum.target_dir);*/

    return ret;
}
Пример #3
0
int MPI_Buffer_attach(void *buffer, int size)
{
    int ret = OMPI_SUCCESS;

  if (MPI_PARAM_CHECK) {
    OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
    if (NULL == buffer || size < 0) {
      return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
    }
  }

  OPAL_CR_ENTER_LIBRARY();
  ret = mca_pml_base_bsend_attach(buffer, size);

  OPAL_CR_EXIT_LIBRARY();
  return ret;
}
Пример #4
0
int MPI_Lookup_name(const char *service_name, MPI_Info info, char *port_name)
{
    char *tmp;

    if ( MPI_PARAM_CHECK ) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if ( NULL == port_name ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
                                          FUNC_NAME);
        }
        if ( NULL == service_name ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
                                          FUNC_NAME);
        }
        if (NULL == info || ompi_info_is_freed(info)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INFO,
                                          FUNC_NAME);
        }
    }

    OPAL_CR_ENTER_LIBRARY();

    /*
     * No predefined info-objects for this function in MPI-2,
     * therefore, we do not parse the info-object at the moment.
     */

    /*
     * if multiple entries found, this implementation uses
     * at the moment the first entry.
     */
    tmp = (char *) ompi_pubsub.lookup(service_name, info);
    if ( NULL == tmp ) {
        return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NAME,
                                      FUNC_NAME);
    }

    strncpy ( port_name, tmp, MPI_MAX_PORT_NAME );

    OPAL_CR_EXIT_LIBRARY();
    return MPI_SUCCESS;
}
Пример #5
0
int OMPI_CR_Restart(char *handle, int seq, MPI_Info *info)
{
    int ret = MPI_SUCCESS;
    MPI_Comm comm = MPI_COMM_WORLD;
    orte_snapc_base_request_op_t *datum = NULL;

    /* argument checking */
    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
    }

    /*
     * Setup the data structure for the operation
     */
    datum = OBJ_NEW(orte_snapc_base_request_op_t);
    datum->event = ORTE_SNAPC_OP_RESTART;
    datum->is_active = true;

    /*
     * Restart is not collective, so the caller is the leader
     */
    datum->leader = OMPI_PROC_MY_NAME->vpid;
    datum->seq_num = seq;
    datum->global_handle = strdup(handle);

    /*
     * Leader sends the request
     */
    OPAL_CR_ENTER_LIBRARY();
    ret = orte_snapc.request_op(datum);
    if( OMPI_SUCCESS != ret ) {
        OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OTHER, 
                               FUNC_NAME);
    }
    OPAL_CR_EXIT_LIBRARY();

    datum->is_active = false;
    OBJ_RELEASE(datum);

    /********** If successful, should never reach this point (JJH) ******/

    return ret;
}
Пример #6
0
int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr)
{

    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (size < 0 || NULL == baseptr) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
                                          FUNC_NAME);
        } else if (NULL == info || ompi_info_is_freed(info)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INFO,
                                          FUNC_NAME);
        }
    }
    
    /* Per these threads:

         http://www.open-mpi.org/community/lists/devel/2007/07/1977.php 
         http://www.open-mpi.org/community/lists/devel/2007/07/1979.php         

       If you call MPI_ALLOC_MEM with a size of 0, you get NULL
       back .*/
    if (0 == size) {
        *((void **) baseptr) = NULL;
        return MPI_SUCCESS;
    }

    OPAL_CR_ENTER_LIBRARY();

    *((void **) baseptr) = mca_mpool_base_alloc((size_t) size, info);
    OPAL_CR_EXIT_LIBRARY();
    if (NULL == *((void **) baseptr)) {
        return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NO_MEM, 
                                      FUNC_NAME);
    }

    /* All done */
    return MPI_SUCCESS;
}
Пример #7
0
int MPI_Group_free(MPI_Group *group)
{
    int ret;

    /* check to make sure we don't free GROUP_NULL.  Note that we *do*
       allow freeing GROUP_EMPTY after much debate in the OMPI core
       group.  The final thread about this, and the decision to
       support freeing GROUP_EMPTY can be found here:

       http://www.open-mpi.org/community/lists/devel/2007/12/2750.php

       The short version: other MPI's allow it (LAM/MPI, CT6, MPICH2)
       probably mainly because the Intel MPI test suite expects it to
       happen and there's now several years worth of expected behavior
       to allow this behavior.  Rather than have to explain every time
       why OMPI is the only one who completely adheres to the standard
       / fails the intel tests, it seemed easier to just let this one
       slide.  It's not really that important, after all! */
    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if ((NULL == group) ||
            (MPI_GROUP_NULL == *group) || (NULL == *group) ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_GROUP,
                                          FUNC_NAME);
        }

    }

    OPAL_CR_ENTER_LIBRARY();

    ret = ompi_group_free ( group);
    OMPI_ERRHANDLER_CHECK(ret, MPI_COMM_WORLD, ret, FUNC_NAME);

    OPAL_CR_EXIT_LIBRARY();
    return MPI_SUCCESS;
}
Пример #8
0
int MPI_Unpublish_name(const char *service_name, MPI_Info info,
                       const char *port_name)
{
    int rc;
    char range[OPAL_MAX_INFO_VAL];
    int flag=0;
    opal_list_t pinfo;
    opal_value_t *rng;
    char **keys = NULL;

    if ( MPI_PARAM_CHECK ) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if ( NULL == port_name ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
                                          FUNC_NAME);
        }
        if ( NULL == service_name ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
                                          FUNC_NAME);
        }
        if (NULL == info || ompi_info_is_freed(info)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INFO,
                                          FUNC_NAME);
        }
    }

    OPAL_CR_ENTER_LIBRARY();
    OBJ_CONSTRUCT(&pinfo, opal_list_t);

    /* OMPI supports info keys to pass the range to
     * be searched for the given key */
    if (MPI_INFO_NULL != info) {
        ompi_info_get (info, "range", sizeof(range) - 1, range, &flag);
        if (flag) {
            if (0 == strcmp(range, "nspace")) {
                rng = OBJ_NEW(opal_value_t);
                rng->key = strdup(OPAL_PMIX_RANGE);
                rng->type = OPAL_INT;
                rng->data.integer = OPAL_PMIX_NAMESPACE;  // share only with procs in same nspace
                opal_list_append(&pinfo, &rng->super);
            } else if (0 == strcmp(range, "session")) {
                rng = OBJ_NEW(opal_value_t);
                rng->key = strdup(OPAL_PMIX_RANGE);
                rng->type = OPAL_INT;
                rng->data.integer = OPAL_PMIX_SESSION; // share only with procs in same session
                opal_list_append(&pinfo, &rng->super);
            } else {
                /* unrecognized scope */
                OPAL_LIST_DESTRUCT(&pinfo);
                return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
                                            FUNC_NAME);
            }
        }
    }

    /* unpublish the service_name */
    opal_argv_append_nosize(&keys, service_name);

    rc = opal_pmix.unpublish(keys, &pinfo);
    opal_argv_free(keys);
    OPAL_LIST_DESTRUCT(&pinfo);

    if ( OPAL_SUCCESS != rc ) {
        if (OPAL_ERR_NOT_FOUND == rc) {
            /* service couldn't be found */
            OPAL_CR_EXIT_LIBRARY();
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_SERVICE,
                                          FUNC_NAME);
        }
        if (OPAL_ERR_PERM == rc) {
            /* this process didn't own the specified service */
            OPAL_CR_EXIT_LIBRARY();
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ACCESS,
                                          FUNC_NAME);
        }

        /* none of the MPI-specific errors occurred - must be some
         * kind of internal error
         */
        OPAL_CR_EXIT_LIBRARY();
        return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INTERN,
                                      FUNC_NAME);
    }

    OPAL_CR_EXIT_LIBRARY();
    return MPI_SUCCESS;
}
Пример #9
0
int MPI_Comm_join(int fd, MPI_Comm *intercomm) 
{
    int rc;
    uint32_t len, rlen, llen, lrlen;
    int send_first=0;
    char *rport;
    ompi_process_name_t rname, tmp_name;

    ompi_communicator_t *newcomp;
    char port_name[MPI_MAX_PORT_NAME];

    if ( MPI_PARAM_CHECK ) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if ( NULL == intercomm ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, 
                                          FUNC_NAME);
        }
    }

    OPAL_CR_ENTER_LIBRARY();

    /* open a port using the specified tag */
    if (OMPI_SUCCESS != (rc = ompi_dpm.open_port(port_name, OMPI_COMM_JOIN_TAG))) {
        OPAL_CR_EXIT_LIBRARY();
        return rc;
    }
    
    /* send my process name */
    tmp_name = *OMPI_PROC_MY_NAME;
    OMPI_PROCESS_NAME_HTON(tmp_name);
    ompi_socket_send(fd, (char*) &tmp_name, sizeof(tmp_name));

    /* recv the remote name */
    ompi_socket_recv(fd, (char*) &rname, sizeof(rname));
    OMPI_PROCESS_NAME_NTOH(rname);

    /* compare the two to get send_first */
    if (OMPI_PROC_MY_NAME->jobid == rname.jobid) {
        if (OMPI_PROC_MY_NAME->vpid < rname.vpid) {
            send_first = true;
        } else if (OMPI_PROC_MY_NAME->vpid == rname.vpid) {
            /* joining to myself is not allowed */
            *intercomm = MPI_COMM_NULL;
            OPAL_CR_EXIT_LIBRARY();
            return MPI_ERR_INTERN;
        } else {
            send_first = false;
        }
    } else if (OMPI_PROC_MY_NAME->jobid < rname.jobid) {
        send_first = true;
    }

    /* sendrecv port-name through the socket connection.
       Need to determine somehow how to avoid a potential deadlock
       here. */
    llen   = (uint32_t)(strlen(port_name)+1);
    len    = htonl(llen);
    
    ompi_socket_send( fd, (char *) &len, sizeof(uint32_t));
    ompi_socket_recv (fd, (char *) &rlen, sizeof(uint32_t));

    lrlen  = ntohl(rlen);
    rport = (char *) malloc (lrlen);
    if ( NULL == rport ) {
        *intercomm = MPI_COMM_NULL;
        OPAL_CR_EXIT_LIBRARY();
        return MPI_ERR_INTERN;
    }

    /* Assumption: socket_send should not block, even if the socket 
       is not configured to be non-blocking, because the message length are
       so short. */
    ompi_socket_send (fd, port_name, llen);
    ompi_socket_recv (fd, rport, lrlen);
    
    /* use the port we received to connect/accept */
    rc = ompi_dpm.connect_accept (MPI_COMM_SELF, 0, rport, send_first, &newcomp);
    
    
    free ( rport );

    *intercomm = newcomp;
    OMPI_ERRHANDLER_RETURN (rc, MPI_COMM_SELF, rc, FUNC_NAME);
}
Пример #10
0
int MPI_Lookup_name(const char *service_name, MPI_Info info, char *port_name)
{
    char range[OPAL_MAX_INFO_VAL];
    int flag=0, ret;
    opal_value_t *rng;
    opal_list_t results, pinfo;
    opal_pmix_pdata_t *pdat;

    if ( MPI_PARAM_CHECK ) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if ( NULL == port_name ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
                                          FUNC_NAME);
        }
        if ( NULL == service_name ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
                                          FUNC_NAME);
        }
        if (NULL == info || ompi_info_is_freed(info)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INFO,
                                          FUNC_NAME);
        }
    }

    OPAL_CR_ENTER_LIBRARY();

    OBJ_CONSTRUCT(&pinfo, opal_list_t);

    /* OMPI supports info keys to pass the range to
     * be searched for the given key */
    if (MPI_INFO_NULL != info) {
        ompi_info_get (info, "range", sizeof(range) - 1, range, &flag);
        if (flag) {
            if (0 == strcmp(range, "nspace")) {
                rng = OBJ_NEW(opal_value_t);
                rng->key = strdup(OPAL_PMIX_RANGE);
                rng->type = OPAL_INT;
                rng->data.integer = OPAL_PMIX_NAMESPACE;  // share only with procs in same nspace
                opal_list_append(&pinfo, &rng->super);
            } else if (0 == strcmp(range, "session")) {
                rng = OBJ_NEW(opal_value_t);
                rng->key = strdup(OPAL_PMIX_RANGE);
                rng->type = OPAL_INT;
                rng->data.integer = OPAL_PMIX_SESSION; // share only with procs in same session
                opal_list_append(&pinfo, &rng->super);
            } else {
                /* unrecognized scope */
                OPAL_LIST_DESTRUCT(&pinfo);
                return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
                                            FUNC_NAME);
            }
        }
    }

    /* collect the findings */
    OBJ_CONSTRUCT(&results, opal_list_t);
    pdat = OBJ_NEW(opal_pmix_pdata_t);
    pdat->value.key = strdup(service_name);
    opal_list_append(&results, &pdat->super);

    ret = opal_pmix.lookup(&results, &pinfo);
    OPAL_LIST_DESTRUCT(&pinfo);
    if (OPAL_SUCCESS != ret ||
        OPAL_STRING != pdat->value.type ||
        NULL == pdat->value.data.string) {
        return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NAME,
                                      FUNC_NAME);
    }

    strncpy ( port_name, pdat->value.data.string, MPI_MAX_PORT_NAME );
    OPAL_LIST_DESTRUCT(&results);

    OPAL_CR_EXIT_LIBRARY();
    return MPI_SUCCESS;
}