Esempio n. 1
0
int smpi_coll_tuned_reduce_ompi_binary( void *sendbuf, void *recvbuf,
                                         int count, MPI_Datatype datatype,
                                         MPI_Op  op, int root,
                                         MPI_Comm  comm)
{
    uint32_t segsize;
    int segcount = count;
    size_t typelng;



    /**
     * Determine number of segments and number of elements
     * sent per operation
     */
    typelng=smpi_datatype_size( datatype );

        // Binary_32K 
    segsize = 32*1024;

    XBT_DEBUG("coll:tuned:reduce_intra_binary rank %d ss %5d",
                 smpi_comm_rank(comm), segsize);

    COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount );

    return smpi_coll_tuned_ompi_reduce_generic( sendbuf, recvbuf, count, datatype, 
                                           op, root, comm, 
                                           ompi_coll_tuned_topo_build_tree(2, comm, root), 
                                           segcount, 0);
}
Esempio n. 2
0
/*
 * Init module on the communicator
 */
const struct mca_coll_base_module_1_0_0_t *
ompi_coll_tuned_module_init(struct ompi_communicator_t *comm)
{
    int size, rank;
    struct mca_coll_base_comm_t *data;
    /* fanout parameters */
    int rc=0;
    int i;


    OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:module_init called."));

    /* This routine will become more complex and might have to be */
    /* broken into more sections/function calls */

    /* Order of operations:
     * alloc memory for nb reqs (in case we fall through) 
     * add decision rules if using dynamic rules
     *     compact rules using communicator size info etc
     * build first guess cached topologies (might depend on the rules from above)
     *
     * then attach all to the communicator and return base module funct ptrs 
     */

    /* Allocate the data that hangs off the communicator */

    if (OMPI_COMM_IS_INTER(comm)) {
        size = ompi_comm_remote_size(comm);
    } else {
        size = ompi_comm_size(comm);
    }


    /* 
     * we still malloc data as it is used by the TUNED modules
     * if we don't allocate it and fall back to a BASIC module routine then confuses debuggers 
     * we place any special info after the default data
     *
     * BUT on very large systems we might not be able to allocate all this memory so
     * we do check a MCA parameter to see if if we should allocate this memory
     *
     * The default is set very high  
     *
     */

    /* if we within the memory/size limit, allow preallocated data */


    if (size<=ompi_coll_tuned_preallocate_memory_comm_size_limit) {
        data = (mca_coll_base_comm_t*)malloc(sizeof(struct mca_coll_base_comm_t) +
                                             (sizeof(ompi_request_t *) * size * 2));
  
        if (NULL == data) {
            return NULL;
        }
        data->mcct_reqs = (ompi_request_t **) (data + 1);
        data->mcct_num_reqs = size * 2;
    }
    else {
        data = (mca_coll_base_comm_t*)malloc(sizeof(struct mca_coll_base_comm_t)); 
  
        if (NULL == data) {
            return NULL;
        }
        data->mcct_reqs = (ompi_request_t **) NULL;
        data->mcct_num_reqs = 0;
    }


    /*
     * If using dynamic and you are MPI_COMM_WORLD and you want to use a parameter file..
     * then this effects how much storage space you need
     * (This is a basic version of what will go into V2)
     *
     */


    size = ompi_comm_size(comm);  /* find size so we can (A) decide if to access the file directly */
    /* (B) so we can get our very own customised ompi_coll_com_rule_t ptr */
    /* which only has rules in it for our com size */

    rank = ompi_comm_rank(comm);    /* find rank as only MCW:0 opens any tuned conf files */
    /* actually if they are below a threadhold, they all open it */
    /* have to build a collective in here.. but just for MCW.. */
    /* but we have to make sure we have the same rules everywhere :( */

    /* if using dynamic rules make sure all overrides are NULL before we start override anything accidently */
    if (ompi_coll_tuned_use_dynamic_rules) {
        /* base rules */
        data->all_base_rules = (ompi_coll_alg_rule_t*) NULL;

        /* each collective rule for my com size */
        for (i=0;i<COLLCOUNT;i++) {
            data->com_rules[i] = (ompi_coll_com_rule_t*) NULL;
        }
    }

    /* next dynamic state, recheck all forced rules as well */
    /* warning, we should check to make sure this is really an INTRA comm here... */
    if (ompi_coll_tuned_use_dynamic_rules) {
        ompi_coll_tuned_forced_getvalues         (ompi_coll_tuned_forced_params[ALLREDUCE], &(data->user_forced[ALLREDUCE]));
        ompi_coll_tuned_forced_getvalues         (ompi_coll_tuned_forced_params[ALLTOALL],  &(data->user_forced[ALLTOALL]));
        ompi_coll_tuned_forced_getvalues         (ompi_coll_tuned_forced_params[ALLGATHER],  &(data->user_forced[ALLGATHER]));
        /*         ompi_coll_tuned_forced_getvalues (ompi_coll_tuned_forced_params[ALLTOALLV], &(data->user_forced[ALLTOALLV])); */
        ompi_coll_tuned_forced_getvalues_barrier (ompi_coll_tuned_forced_params[BARRIER],   &(data->user_forced[BARRIER]));
        ompi_coll_tuned_forced_getvalues         (ompi_coll_tuned_forced_params[BCAST],     &(data->user_forced[BCAST]));
        ompi_coll_tuned_forced_getvalues         (ompi_coll_tuned_forced_params[REDUCE],    &(data->user_forced[REDUCE]));
    }


    if (&ompi_mpi_comm_world==comm) {

        if (ompi_coll_tuned_use_dynamic_rules) {

            OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:module_init MCW & Dynamic"));

            if (ompi_coll_tuned_dynamic_rules_filename) {
                OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:module_init Opening [%s]", 
                             ompi_coll_tuned_dynamic_rules_filename));
                rc = ompi_coll_tuned_read_rules_config_file (ompi_coll_tuned_dynamic_rules_filename,
                                                             &(data->all_base_rules), COLLCOUNT);
                if (rc>=0) {
                    OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:module_init Read %d valid rules\n", rc));
                    /* at this point we all have a base set of rules */
                    /* now we can get our customized communicator sized rule set, for each collective */
                    for (i=0;i<COLLCOUNT;i++) {
                        data->com_rules[i] = ompi_coll_tuned_get_com_rule_ptr (data->all_base_rules, i, size);
                    }
                }
                else { /* failed to read config file, thus make sure its a NULL... */
                    data->all_base_rules = (ompi_coll_alg_rule_t*) NULL;
                }


            } /* end if a config filename exists */

        } /* end if dynamic_rules */

    } /* end if MCW */
  
    /* ok, if using dynamic rules, not MCW and we are just any rank and a base set of rules exist.. ref them */
    /* order of eval is important here, if we are MCW ompi_mpi_comm_world.c_coll_selected_data is NULL still.. */
    if ((ompi_coll_tuned_use_dynamic_rules)&&(!(&ompi_mpi_comm_world==comm))&&
        ((ompi_mpi_comm_world.c_coll_selected_data)->all_base_rules)) {

        OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:module_init NOT MCW & Dynamic"));

        /* this will, erm fail if MCW doesn't exist which it should! */
        data->all_base_rules = (ompi_mpi_comm_world.c_coll_selected_data)->all_base_rules;

        /* at this point we all have a base set of rules if they exist atall */
        /* now we can get our customized communicator sized rule set, for each collective */
        for (i=0;i<COLLCOUNT;i++) {
            data->com_rules[i] = ompi_coll_tuned_get_com_rule_ptr (data->all_base_rules, i, size);
        }
    }

    /* 
     * now for the cached topo functions 
     * guess the initial topologies to use rank 0 as root 
     */

    /* general n fan out tree */
    data->cached_ntree = ompi_coll_tuned_topo_build_tree (ompi_coll_tuned_init_tree_fanout, comm, 0); 
    data->cached_ntree_root = 0;
    data->cached_ntree_fanout = ompi_coll_tuned_init_tree_fanout;

    /* binary tree */
    data->cached_bintree = ompi_coll_tuned_topo_build_tree (2, comm, 0); 
    data->cached_bintree_root = 0;

    /* binomial tree */
    data->cached_bmtree = ompi_coll_tuned_topo_build_bmtree (comm, 0);
    data->cached_bmtree_root = 0;

    /* 
     * chains (fanout followed by pipelines)
     * are more difficuilt as the fan out really really depends on message size [sometimes].. 
     * as size gets larger fan-out gets smaller [usually]
     * 
     * will probably change how we cache this later, for now a midsize
     * GEF
     */
    data->cached_chain = ompi_coll_tuned_topo_build_chain (ompi_coll_tuned_init_chain_fanout, comm, 0);
    data->cached_chain_root = 0;
    data->cached_chain_fanout = ompi_coll_tuned_init_chain_fanout;

    /* standard pipeline */
    data->cached_pipeline = ompi_coll_tuned_topo_build_chain (1, comm, 0);
    data->cached_pipeline_root = 0;

    /* All done */

    comm->c_coll_selected_data = data;

    OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:module_init Tuned is in use"));
    return to_use;
}