Ejemplo n.º 1
0
int
ompi_osc_portals4_get_info(struct ompi_win_t *win, struct ompi_info_t **info_used)
{
    ompi_osc_portals4_module_t *module =
        (ompi_osc_portals4_module_t*) win->w_osc_module;

    ompi_info_t *info = OBJ_NEW(ompi_info_t);
    if (NULL == info) return OMPI_ERR_TEMP_OUT_OF_RESOURCE;

    ompi_info_set(info, "no_locks",  (module->state.lock == LOCK_ILLEGAL) ? "true" : "false");
    if (module->atomic_max < mca_osc_portals4_component.matching_atomic_max) {
        ompi_info_set(info, "accumulate_ordering", "none");
    } else {
        ompi_info_set(info, "accumulate_ordering", "rar,war,raw,waw");
    }

    *info_used = info;

    return OMPI_SUCCESS;
}
Ejemplo n.º 2
0
int ompi_info_set_value_enum (ompi_info_t *info, const char *key, int value,
                              mca_base_var_enum_t *var_enum)
{
    const char *string_value;
    int ret;

    ret = var_enum->string_from_value (var_enum, value, &string_value);
    if (OPAL_SUCCESS != ret) {
        return ret;
    }

    return ompi_info_set (info, key, string_value);
}
Ejemplo n.º 3
0
/**
 *   MPI_Info_set - Set a (key, value) pair in an 'MPI_Info' object
 *
 *   @param key null-terminated character string of the index key
 *   @param value null-terminated character string of the value
 *   @param info info object (handle)
 *
 *   @retval MPI_SUCCESS
 *   @retval MPI_ERR_ARG
 *   @retval MPI_ERR_INFO_KEY
 *   @retval MPI_ERR_INFO_VAL
 *   @retval MPI_ERR_INFO_NOKEY
 *   @retval MPI_ERR_NO_MEM
 *
 *   MPI_Info_set adds the (key,value) pair to info, and overrides
 *   the value if for the same key a previsou value was set. key and
 *   value must be NULL terminated strings in C. In Fortan, leading
 *   and trailing spaces in key and value are stripped. If either
 *   key or value is greater than the allowed maxima, MPI_ERR_INFO_KEY
 *   and MPI_ERR_INFO_VALUE are raised
 */
int MPI_Info_set(MPI_Info info, const char *key, const char *value)
{
    int err;
    int key_length;
    int value_length;

    /*
     * Error conditions are
     *   - info is NULL
     *   - No storage space available for the new value
     *   - Key length exceeded MPI_MAX_KEY_VAL
     *   - value length exceeded MPI_MAX_KEY_VAL
     */

    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (NULL == info || MPI_INFO_NULL == info ||
            ompi_info_is_freed(info)) {
            return OMPI_ERRHANDLER_INVOKE (MPI_COMM_WORLD, MPI_ERR_INFO,
                                           FUNC_NAME);
        }

        key_length = (key) ? (int)strlen (key) : 0;
        if ((NULL == key) || (0 == key_length) ||
            (MPI_MAX_INFO_KEY <= key_length)) {
            return OMPI_ERRHANDLER_INVOKE (MPI_COMM_WORLD, MPI_ERR_INFO_KEY,
                                           FUNC_NAME);
        }

        value_length = (value) ? (int)strlen (value) : 0;
        if ((NULL == value) || (0 == value_length) ||
            (MPI_MAX_INFO_VAL <= value_length)) {
            return OMPI_ERRHANDLER_INVOKE (MPI_COMM_WORLD, MPI_ERR_INFO_VALUE,
                                           FUNC_NAME);
        }
    }

    OPAL_CR_ENTER_LIBRARY();

    /*
     * If all is right with the arguments, then call the back-end
     * allocator.
     */

    err = ompi_info_set (info, key, value);
    OMPI_ERRHANDLER_RETURN(err, MPI_COMM_WORLD, err, FUNC_NAME);
}
Ejemplo n.º 4
0
/*
 * Duplicate an info
 */
int ompi_info_dup (ompi_info_t *info, ompi_info_t **newinfo)
{
    int err;
    opal_list_item_t *item;
    ompi_info_entry_t *iterator;

    OPAL_THREAD_LOCK(info->i_lock);
    for (item = opal_list_get_first(&(info->super));
         item != opal_list_get_end(&(info->super));
         item = opal_list_get_next(iterator)) {
         iterator = (ompi_info_entry_t *) item;
         err = ompi_info_set(*newinfo, iterator->ie_key, iterator->ie_value);
         if (MPI_SUCCESS != err) {
            OPAL_THREAD_UNLOCK(info->i_lock);
            return err;
         }
     }
    OPAL_THREAD_UNLOCK(info->i_lock);
     return MPI_SUCCESS;
}
Ejemplo n.º 5
0
/*
 * This function is called during ompi_init and initializes the
 * fortran to C translation table. It also fills in the values
 * for the MPI_INFO_GET_ENV object
 */
int ompi_info_init(void)
{
    char val[OPAL_MAXHOSTNAMELEN];
    char *cptr;

    /* initialize table */

    OBJ_CONSTRUCT(&ompi_info_f_to_c_table, opal_pointer_array_t);
    if( OPAL_SUCCESS != opal_pointer_array_init(&ompi_info_f_to_c_table, 0,
                                                OMPI_FORTRAN_HANDLE_MAX, 64) ) {
        return OMPI_ERROR;
    }

    /* Create MPI_INFO_NULL */
    OBJ_CONSTRUCT(&ompi_mpi_info_null.info, ompi_info_t);
    assert(ompi_mpi_info_null.info.i_f_to_c_index == 0);

    /* Create MPI_INFO_ENV */
    OBJ_CONSTRUCT(&ompi_mpi_info_env.info, ompi_info_t);
    assert(ompi_mpi_info_env.info.i_f_to_c_index == 1);

    /* fill the env info object */

    /* command for this app_context */
    if (NULL != (cptr = getenv("OMPI_COMMAND"))) {
        ompi_info_set(&ompi_mpi_info_env.info, "command", cptr);
    }

    /* space-separated list of argv for this command */
    if (NULL != (cptr = getenv("OMPI_ARGV"))) {
        ompi_info_set(&ompi_mpi_info_env.info, "argv", cptr);
    }

    /* max procs for the entire job */
    if (NULL != (cptr = getenv("OMPI_MCA_orte_ess_num_procs"))) {
        ompi_info_set(&ompi_mpi_info_env.info, "maxprocs", cptr);
        /* Open MPI does not support the "soft" option, so set it to maxprocs */
        ompi_info_set(&ompi_mpi_info_env.info, "soft", cptr);
    }

    /* local host name */
    gethostname(val, sizeof(val));
    ompi_info_set(&ompi_mpi_info_env.info, "host", val);

    /* architecture name */
    if (NULL != (cptr = getenv("OMPI_MCA_orte_cpu_type"))) {
        ompi_info_set(&ompi_mpi_info_env.info, "arch", cptr);
    }
#ifdef HAVE_SYS_UTSNAME_H
    else {
        struct utsname sysname;
        uname(&sysname);
        cptr = sysname.machine;
        ompi_info_set(&ompi_mpi_info_env.info, "arch", cptr);
    }
#endif

    /* initial working dir of this process - only set when
     * run by mpiexec as we otherwise have no reliable way
     * of determining the value
     */
    if (NULL != (cptr = getenv("OMPI_MCA_initial_wdir"))) {
        ompi_info_set(&ompi_mpi_info_env.info, "wdir", cptr);
    }

    /* provide the REQUESTED thread level - may be different
     * than the ACTUAL thread level you get.
     * ugly, but have to do a switch to find the string representation */
    switch (ompi_mpi_thread_requested) {
    case MPI_THREAD_SINGLE:
        ompi_info_set(&ompi_mpi_info_env.info, "thread_level", "MPI_THREAD_SINGLE");
        break;
    case MPI_THREAD_FUNNELED:
        ompi_info_set(&ompi_mpi_info_env.info, "thread_level", "MPI_THREAD_FUNNELED");
        break;
    case MPI_THREAD_SERIALIZED:
        ompi_info_set(&ompi_mpi_info_env.info, "thread_level", "MPI_THREAD_SERIALIZED");
        break;
    case MPI_THREAD_MULTIPLE:
        ompi_info_set(&ompi_mpi_info_env.info, "thread_level", "MPI_THREAD_MULTIPLE");
        break;
    default:
        /* do nothing - don't know the value */
        break;
    }

    /**** now some OMPI-specific values that other MPIs may not provide ****/

    /* the number of app_contexts in this job */
    if (NULL != (cptr = getenv("OMPI_NUM_APP_CTX"))) {
        ompi_info_set(&ompi_mpi_info_env.info, "ompi_num_apps", cptr);
    }

    /* space-separated list of first MPI rank of each app_context */
    if (NULL != (cptr = getenv("OMPI_FIRST_RANKS"))) {
        ompi_info_set(&ompi_mpi_info_env.info, "ompi_first_rank", cptr);
    }

    /* space-separated list of num procs for each app_context */
    if (NULL != (cptr = getenv("OMPI_APP_CTX_NUM_PROCS"))) {
        ompi_info_set(&ompi_mpi_info_env.info, "ompi_np", cptr);
    }

    /* location of the directory containing any prepositioned files
     * the user may have requested
     */
    if (NULL != (cptr = getenv("OMPI_FILE_LOCATION"))) {
        ompi_info_set(&ompi_mpi_info_env.info, "ompi_positioned_file_dir", cptr);
    }

    /* All done */

    return OMPI_SUCCESS;
}