Ejemplo n.º 1
0
static void
mca_coll_hierarch_checkfor_sm ( struct ompi_communicator_t *comm, int *color,  int *ncount )
{
    int i, size;
    int lncount=0;
    struct ompi_proc_t** procs=NULL;
    struct ompi_proc_t* my_proc=NULL;


    *color = -1;
    size = ompi_comm_size(comm);
    my_proc = ompi_proc_local();
    procs = comm->c_local_group->grp_proc_pointers;
    for ( i = 0 ; i < size ; i++) {
	if ( OMPI_CAST_RTE_NAME(&procs[i]->super.proc_name)->jobid == OMPI_CAST_RTE_NAME(&my_proc->super.proc_name)->jobid &&
	     ( OPAL_PROC_ON_LOCAL_NODE(procs[i]->super.proc_flags)) ) {
	    lncount++;
	    if ( *color == -1){
		 *color = i;
	    }
	}
    }

    /* we need to decrease ncount in order to make the other allreduce/allgather 
       operations work */
    lncount--;
    *ncount = lncount;
    return;
}
Ejemplo n.º 2
0
/*
 * Local helper function to build an array of all the procs in a
 * communicator, excluding this process.
 *
 * Killing a just the indicated peers must be implemented for
 * MPI_Abort() to work according to the standard language for
 * a 'high-quality' implementation.
 *
 * It would be nifty if we could differentiate between the
 * abort scenarios (but we don't, currently):
 *      - MPI_Abort()
 *      - MPI_ERRORS_ARE_FATAL
 *      - Victim of MPI_Abort()
 */
static void try_kill_peers(ompi_communicator_t *comm,
                           int errcode)
{
    int nprocs;
    ompi_process_name_t *procs;

    nprocs = ompi_comm_size(comm);
    /* ompi_comm_remote_size() returns 0 if not an intercomm, so
       this is safe */
    nprocs += ompi_comm_remote_size(comm);

    procs = (ompi_process_name_t*) calloc(nprocs, sizeof(ompi_process_name_t));
    if (NULL == procs) {
        /* quick clean orte and get out */
        ompi_rte_abort(errno, "Abort: unable to alloc memory to kill procs");
    }

    /* put all the local group procs in the abort list */
    int rank, i, count;
    rank = ompi_comm_rank(comm);
    for (count = i = 0; i < ompi_comm_size(comm); ++i) {
        if (rank == i) {
            /* Don't include this process in the array */
            --nprocs;
        } else {
            assert(count <= nprocs);
            procs[count++] =
                *OMPI_CAST_RTE_NAME(&ompi_group_get_proc_ptr(comm->c_remote_group, i)->super.proc_name);
        }
    }

    /* if requested, kill off remote group procs too */
    for (i = 0; i < ompi_comm_remote_size(comm); ++i) {
        assert(count <= nprocs);
        procs[count++] =
            *OMPI_CAST_RTE_NAME(&ompi_group_get_proc_ptr(comm->c_remote_group, i)->super.proc_name);
    }

    if (nprocs > 0) {
        ompi_rte_abort_peers(procs, nprocs, errcode);
    }

    /* We could fall through here if ompi_rte_abort_peers() fails, or
       if (nprocs == 0).  Either way, tidy up and let the caller
       handle it. */
    free(procs);
}
Ejemplo n.º 3
0
static int bootstrap_comm(ompi_communicator_t *comm,
                          mca_coll_sm_module_t *module)
{
    int i;
    char *shortpath, *fullpath;
    mca_coll_sm_component_t *c = &mca_coll_sm_component;
    mca_coll_sm_comm_t *data = module->sm_comm_data;
    int comm_size = ompi_comm_size(comm);
    int num_segments = c->sm_comm_num_segments;
    int num_in_use = c->sm_comm_num_in_use_flags;
    int frag_size = c->sm_fragment_size;
    int control_size = c->sm_control_size;
    ompi_process_name_t *lowest_name = NULL;
    size_t size;
    ompi_proc_t *proc;

    /* Make the rendezvous filename for this communicators shmem data
       segment.  The CID is not guaranteed to be unique among all
       procs on this node, so also pair it with the PID of the proc
       with the lowest ORTE name to form a unique filename. */
    proc = ompi_group_peer_lookup(comm->c_local_group, 0);
    lowest_name = OMPI_CAST_RTE_NAME(&proc->super.proc_name);
    for (i = 1; i < comm_size; ++i) {
        proc = ompi_group_peer_lookup(comm->c_local_group, i);
        if (ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
                                          OMPI_CAST_RTE_NAME(&proc->super.proc_name),
                                          lowest_name) < 0) {
            lowest_name = OMPI_CAST_RTE_NAME(&proc->super.proc_name);
        }
    }
    asprintf(&shortpath, "coll-sm-cid-%d-name-%s.mmap", comm->c_contextid,
             OMPI_NAME_PRINT(lowest_name));
    if (NULL == shortpath) {
        opal_output_verbose(10, ompi_coll_base_framework.framework_output,
                            "coll:sm:enable:bootstrap comm (%d/%s): asprintf failed",
                            comm->c_contextid, comm->c_name);
        return OMPI_ERR_OUT_OF_RESOURCE;
    }
    fullpath = opal_os_path(false, ompi_process_info.job_session_dir,
                            shortpath, NULL);
    free(shortpath);
    if (NULL == fullpath) {
        opal_output_verbose(10, ompi_coll_base_framework.framework_output,
                            "coll:sm:enable:bootstrap comm (%d/%s): opal_os_path failed",
                            comm->c_contextid, comm->c_name);
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    /* Calculate how much space we need in the per-communicator shmem
       data segment.  There are several values to add:

       - size of the barrier data (2 of these):
           - fan-in data (num_procs * control_size)
           - fan-out data (num_procs * control_size)
       - size of the "in use" buffers:
           - num_in_use_buffers * control_size
       - size of the message fragment area (one for each segment):
           - control (num_procs * control_size)
           - fragment data (num_procs * (frag_size))

       So it's:

           barrier: 2 * control_size + 2 * control_size
           in use:  num_in_use * control_size
           control: num_segments * (num_procs * control_size * 2 +
                                    num_procs * control_size)
           message: num_segments * (num_procs * frag_size)
     */

    size = 4 * control_size +
        (num_in_use * control_size) +
        (num_segments * (comm_size * control_size * 2)) +
        (num_segments * (comm_size * frag_size));
    opal_output_verbose(10, ompi_coll_base_framework.framework_output,
                        "coll:sm:enable:bootstrap comm (%d/%s): attaching to %" PRIsize_t " byte mmap: %s",
                        comm->c_contextid, comm->c_name, size, fullpath);
    if (0 == ompi_comm_rank (comm)) {
        data->sm_bootstrap_meta = mca_common_sm_module_create_and_attach (size, fullpath, sizeof(mca_common_sm_seg_header_t), 8);
        if (NULL == data->sm_bootstrap_meta) {
            opal_output_verbose(10, ompi_coll_base_framework.framework_output,
                "coll:sm:enable:bootstrap comm (%d/%s): mca_common_sm_init_group failed",
                comm->c_contextid, comm->c_name);
            free(fullpath);
            return OMPI_ERR_OUT_OF_RESOURCE;
        }

        for (int i = 1 ; i < ompi_comm_size (comm) ; ++i) {
            MCA_PML_CALL(send(&data->sm_bootstrap_meta->shmem_ds, sizeof (data->sm_bootstrap_meta->shmem_ds), MPI_BYTE,
                         i, MCA_COLL_BASE_TAG_BCAST, MCA_PML_BASE_SEND_STANDARD, comm));
        }
    } else {
        opal_shmem_ds_t shmem_ds;
        MCA_PML_CALL(recv(&shmem_ds, sizeof (shmem_ds), MPI_BYTE, 0, MCA_COLL_BASE_TAG_BCAST, comm, MPI_STATUS_IGNORE));
        data->sm_bootstrap_meta = mca_common_sm_module_attach (&shmem_ds, sizeof(mca_common_sm_seg_header_t), 8);
    }

    /* All done */
    free(fullpath);
    return OMPI_SUCCESS;
}
Ejemplo n.º 4
0
int bcol_basesmuma_smcm_allgather_connection(
                                             mca_bcol_basesmuma_module_t *sm_bcol_module,
                                             mca_sbgp_base_module_t *module,
                                             opal_list_t *peer_list,
                                             bcol_basesmuma_smcm_proc_item_t ***back_files,
                                             ompi_communicator_t *comm,
                                             bcol_basesmuma_smcm_file_t input,
                                             char *base_fname,
                                             bool map_all)
{

    /* define local variables */

    int rc, i, fd;
    ptrdiff_t mem_offset;
    ompi_proc_t *proc_temp, *my_id;
    bcol_basesmuma_smcm_proc_item_t *temp;
    bcol_basesmuma_smcm_proc_item_t *item_ptr;
    bcol_basesmuma_smcm_proc_item_t **backing_files;
    struct file_info_t local_file;
    struct file_info_t *all_files=NULL;

    /* sanity check */
    if (strlen(input.file_name) > SM_BACKING_FILE_NAME_MAX_LEN-1) {
        opal_output (ompi_bcol_base_framework.framework_output, "backing file name too long:  %s len :: %d",
                     input.file_name, (int) strlen(input.file_name));
        return OMPI_ERR_BAD_PARAM;
    }

    backing_files = (bcol_basesmuma_smcm_proc_item_t **)
        calloc(module->group_size, sizeof(bcol_basesmuma_smcm_proc_item_t *));
    if (!backing_files) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    /* FIXME *back_files might have been already allocated
     * so free it in order to avoid a memory leak */
    if (NULL != *back_files) {
        free (*back_files);
    }
    *back_files = backing_files;

    my_id = ompi_proc_local();

    /* Phase One:
       gather a list of processes that will participate in the allgather - I'm
       preparing this list from the sbgp-ing module that was passed into the function */

    /* fill in local file information */
    local_file.vpid  = ((orte_process_name_t*)&my_id->super.proc_name)->vpid;
    local_file.jobid = ((orte_process_name_t*)&my_id->super.proc_name)->jobid;
    local_file.file_size=input.size;
    local_file.size_ctl_structure=input.size_ctl_structure;
    local_file.data_seg_alignment=input.data_seg_alignment;

    strcpy (local_file.file_name, input.file_name);

    /* will exchange this data type as a string of characters -
     * this routine is first called before MPI_init() completes
     * and before error handling is setup, so can't use the
     * MPI data types to send this data */
    all_files = (struct file_info_t *) calloc(module->group_size,
                                              sizeof (struct file_info_t));
    if (!all_files) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    /* exchange data */
    rc = comm_allgather_pml(&local_file,all_files,sizeof(struct file_info_t), MPI_CHAR,
                            sm_bcol_module->super.sbgp_partner_module->my_index,
                            sm_bcol_module->super.sbgp_partner_module->group_size,
                            sm_bcol_module->super.sbgp_partner_module->group_list,
                            sm_bcol_module->super.sbgp_partner_module->group_comm);
    if( OMPI_SUCCESS != rc ) {
        opal_output (ompi_bcol_base_framework.framework_output, "failed in comm_allgather_pml.  Error code: %d", rc);
        goto Error;
    }

    /* Phase four:
       loop through the receive buffer, unpack the data recieved from remote peers */

    for (i = 0; i < module->group_size; i++) {
        struct file_info_t *rem_file = all_files + i;

        /* check if this is my index or if the file is already mapped (set above). ther
         * is no reason to look through the peer list again because no two members of
         * the group will have the same vpid/jobid pair. ignore this previously found
         * mapping if map_all was requested (NTH: not sure why exactly since we re-map
         * and already mapped file) */
        if (sm_bcol_module->super.sbgp_partner_module->my_index == i) {
            continue;
        }

        proc_temp = ompi_comm_peer_lookup(comm,module->group_list[i]);

        OPAL_LIST_FOREACH(item_ptr, peer_list, bcol_basesmuma_smcm_proc_item_t) {
            /* if the vpid/jobid/filename combination already exists in the list,
               then do not map this peer's file --- because you already have */
            if (0 == ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
                                                  OMPI_CAST_RTE_NAME(&proc_temp->super.proc_name),
                                                  &item_ptr->peer) &&
                0 == strcmp (item_ptr->sm_file.file_name, rem_file->file_name)) {
                ++item_ptr->refcnt;
                /* record file data */
                backing_files[i] = item_ptr;
                break;
            }
        }

        if (!map_all && backing_files[i]) {
            continue;
        }

        temp = OBJ_NEW(bcol_basesmuma_smcm_proc_item_t);
        if (!temp) {
            rc = OMPI_ERR_OUT_OF_RESOURCE;
            goto Error;
        }

        temp->peer.vpid = rem_file->vpid;
        temp->peer.jobid = rem_file->jobid;

        temp->sm_file.file_name = strdup (rem_file->file_name);
        if (!temp->sm_file.file_name) {
            rc = OMPI_ERR_OUT_OF_RESOURCE;
            OBJ_RELEASE(temp);
            goto Error;
        }

        temp->sm_file.size = (size_t) rem_file->file_size;
        temp->sm_file.mpool_size = (size_t) rem_file->file_size;
        temp->sm_file.size_ctl_structure = (size_t) rem_file->size_ctl_structure;
        temp->sm_file.data_seg_alignment = (size_t) rem_file->data_seg_alignment;
        temp->refcnt = 1;

        /* Phase Five:
           If map_all == true, then  we map every peer's file
           else we check to see if I have already mapped this
           vpid/jobid/filename combination and if I have, then
           I do not mmap this peer's file.
           *
           */
        fd = open(temp->sm_file.file_name, O_RDWR, 0600);
        if (0 > fd) {
            opal_output (ompi_bcol_base_framework.framework_output, "SMCM Allgather failed to open sm backing file %s. errno = %d",
                         temp->sm_file.file_name, errno);
            rc = OMPI_ERROR;
            goto Error;
        }

        /* map the file */
        temp->sm_mmap = bcol_basesmuma_smcm_reg_mmap (NULL, fd, temp->sm_file.size,
                                                      temp->sm_file.size_ctl_structure,
                                                      temp->sm_file.data_seg_alignment,
                                                      temp->sm_file.file_name);
        close (fd);
        if (NULL == temp->sm_mmap) {
            opal_output (ompi_bcol_base_framework.framework_output, "mmapping failed to map remote peer's file");
            OBJ_RELEASE(temp);
            rc = OMPI_ERROR;
            goto Error;
        }

        /* compute memory offset */
        mem_offset = (ptrdiff_t) temp->sm_mmap->data_addr -
            (ptrdiff_t) temp->sm_mmap->map_seg;
        temp->sm_mmap->map_seg->seg_offset = mem_offset;
        temp->sm_mmap->map_seg->seg_size = temp->sm_file.size - mem_offset;
        /* more stuff to follow */

        /* append this peer's info, including shared memory map addr, onto the
           peer_list */

        /* record file data */
        backing_files[i] = (bcol_basesmuma_smcm_proc_item_t *) temp;

        opal_list_append(peer_list, (opal_list_item_t*) temp);
    }

    rc = OMPI_SUCCESS;

 Error:

    /* error clean-up and return */
    if (NULL != all_files) {
        free(all_files);
    }

    return rc;
}
Ejemplo n.º 5
0
int mca_sharedfp_lockedfile_file_open (struct ompi_communicator_t *comm,
				       const char* filename,
				       int amode,
				       struct opal_info_t *info,
				       mca_io_ompio_file_t *fh)
{
    int err = MPI_SUCCESS;
    char * lockedfilename;
    int handle, rank;
    struct mca_sharedfp_lockedfile_data * module_data = NULL;
    struct mca_sharedfp_base_data_t* sh;
    mca_io_ompio_file_t * shfileHandle, *ompio_fh;
    mca_io_ompio_data_t *data;

    /*------------------------------------------------------------*/
    /*Open the same file again without shared file pointer support*/
    /*------------------------------------------------------------*/
    shfileHandle =  (mca_io_ompio_file_t *)malloc(sizeof(mca_io_ompio_file_t));
    err = mca_common_ompio_file_open(comm,filename,amode,info,shfileHandle,false);
    if ( OMPI_SUCCESS != err)  {
        opal_output(0, "mca_sharedfp_lockedfile_file_open: Error during file open\n");
        return err;
    }
    shfileHandle->f_fh = fh->f_fh;
    data = (mca_io_ompio_data_t *) fh->f_fh->f_io_selected_data;
    ompio_fh = &data->ompio_fh;

    err = mca_common_ompio_set_view (shfileHandle,
                                     ompio_fh->f_disp,
                                     ompio_fh->f_etype,
                                     ompio_fh->f_orig_filetype,
                                     ompio_fh->f_datarep,
                                     &(MPI_INFO_NULL->super));
    

    /*Memory is allocated here for the sh structure*/
    sh = (struct mca_sharedfp_base_data_t*)malloc(sizeof(struct mca_sharedfp_base_data_t));
    if ( NULL == sh){
        opal_output(0, "mca_sharedfp_lockedfile_file_open: Error, unable to malloc f_sharedfp_ptr struct\n");
	free ( shfileHandle);
        return OMPI_ERR_OUT_OF_RESOURCE;
    }
    /*Populate the sh file structure based on the implementation*/
    sh->sharedfh      = shfileHandle;			/* Shared file pointer*/
    sh->global_offset = 0;				/* Global Offset*/
    sh->comm          = comm; 				/* Communicator*/
    sh->selected_module_data = NULL;

    rank = ompi_comm_rank ( sh->comm);

    /*Open a new file which will maintain the pointer for this file open*/
    if ( mca_sharedfp_lockedfile_verbose ) {
        opal_output(ompi_sharedfp_base_framework.framework_output,
                    "mca_sharedfp_lockedfile_file_open: open locked file.\n");
    }


    module_data = (struct mca_sharedfp_lockedfile_data*)malloc(sizeof(struct mca_sharedfp_lockedfile_data));
    if ( NULL == module_data ) {
        opal_output(ompi_sharedfp_base_framework.framework_output,
                    "mca_sharedfp_lockedfile_file_open: Error, unable to malloc lockedfile_data struct\n");
	free (shfileHandle);
	free (sh);
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    opal_jobid_t masterjobid;
    if ( 0 == comm->c_my_rank  ) {
        ompi_proc_t *masterproc = ompi_group_peer_lookup(comm->c_local_group, 0 );
        masterjobid = OMPI_CAST_RTE_NAME(&masterproc->super.proc_name)->jobid;
    }
    comm->c_coll->coll_bcast ( &masterjobid, 1, MPI_UNSIGNED, 0, comm, 
                               comm->c_coll->coll_bcast_module );
 
    size_t filenamelen = strlen(filename) + 16;
    lockedfilename = (char*)malloc(sizeof(char) * filenamelen);
    if ( NULL == lockedfilename ) {
	free (shfileHandle);
	free (sh);
        free (module_data);
        return OMPI_ERR_OUT_OF_RESOURCE;
    }
    snprintf(lockedfilename, filenamelen, "%s-%u%s",filename,masterjobid,".lock");
    module_data->filename = lockedfilename;

    /*-------------------------------------------------*/
    /*Open the lockedfile without shared file pointer  */
    /*-------------------------------------------------*/
    if ( 0 == rank ) {
	OMPI_MPI_OFFSET_TYPE position=0;
	/*only let main process initialize file pointer,
	 *therefore there is no need to lock the file
	 */
	handle = open ( lockedfilename, O_RDWR | O_CREAT, 0644 );
	write ( handle, &position, sizeof(OMPI_MPI_OFFSET_TYPE) );
	close ( handle );
    }
    comm->c_coll->coll_barrier ( comm, comm->c_coll->coll_barrier_module );

    handle = open ( lockedfilename, O_RDWR, 0644  );
    if ( -1 == handle ) {
        opal_output(0, "[%d]mca_sharedfp_lockedfile_file_open: Error during file open\n", rank);
	free (shfileHandle);
	free (sh);
	free(module_data);
        return OMPI_ERROR;
    }

    /*Store the new file handle*/
    module_data->handle = handle;
    /* Assign the lockedfile_data to sh->handle*/
    sh->selected_module_data   = module_data;
    /*remember the shared file handle*/
    fh->f_sharedfp_data = sh;

    comm->c_coll->coll_barrier ( comm, comm->c_coll->coll_barrier_module );

    return err;
}