示例#1
0
int ompio_io_ompio_file_read_at (mca_io_ompio_file_t *fh,
				 OMPI_MPI_OFFSET_TYPE offset,
				 void *buf,
				 int count,
				 struct ompi_datatype_t *datatype,
				 ompi_status_public_t * status)
{
    int ret = OMPI_SUCCESS;
    OMPI_MPI_OFFSET_TYPE prev_offset;

    ompio_io_ompio_file_get_position (fh, &prev_offset );

    ompi_io_ompio_set_explicit_offset (fh, offset);
    ret = ompio_io_ompio_file_read (fh,
				    buf,
				    count,
				    datatype,
				    status);

    // An explicit offset file operation is not suppsed to modify
    // the internal file pointer. So reset the pointer
    // to the previous value
    ompi_io_ompio_set_explicit_offset (fh, prev_offset);

    return ret;
}
示例#2
0
int ompio_io_ompio_file_iread_at_all (mca_io_ompio_file_t *fp,
				      OMPI_MPI_OFFSET_TYPE offset,
				      void *buf,
				      int count,
				      struct ompi_datatype_t *datatype,
				      ompi_request_t **request)
{
    int ret = OMPI_SUCCESS;
    OMPI_MPI_OFFSET_TYPE prev_offset;

    ompio_io_ompio_file_get_position (fp, &prev_offset );
    ompi_io_ompio_set_explicit_offset (fp, offset);

    if ( NULL != fp->f_fcoll->fcoll_file_iread_all ) {
	ret = fp->f_fcoll->fcoll_file_iread_all (fp,
						 buf,
						 count,
						 datatype,
						 request);
    }
    else {
	/* this fcoll component does not support non-blocking
	   collective I/O operations. WE fake it with
	   individual non-blocking I/O operations. */
	ret = ompio_io_ompio_file_iread ( fp, buf, count, datatype, request );
    }


    ompi_io_ompio_set_explicit_offset (fp, prev_offset);
    return ret;
}
示例#3
0
int ompio_io_ompio_file_iread_at (mca_io_ompio_file_t *fh,
				  OMPI_MPI_OFFSET_TYPE offset,
				  void *buf,
				  int count,
				  struct ompi_datatype_t *datatype,
				  ompi_request_t **request)
{
    int ret = OMPI_SUCCESS;
    OMPI_MPI_OFFSET_TYPE prev_offset;
    ompio_io_ompio_file_get_position (fh, &prev_offset );

    ompi_io_ompio_set_explicit_offset (fh, offset);
    ret = ompio_io_ompio_file_iread (fh,
				    buf,
				    count,
				    datatype,
				    request);

    /* An explicit offset file operation is not suppsed to modify
    ** the internal file pointer. So reset the pointer
    ** to the previous value
    ** It is OK to reset the position already here, althgouth
    ** the operation might still be pending/ongoing, since
    ** the entire array of <offset, length, memaddress> have
    ** already been constructed in the file_iread operation
    */
    ompi_io_ompio_set_explicit_offset (fh, prev_offset);

    return ret;
}
示例#4
0
int ompio_io_ompio_file_read_at_all (mca_io_ompio_file_t *fh,
				     OMPI_MPI_OFFSET_TYPE offset,
				     void *buf,
				     int count,
				     struct ompi_datatype_t *datatype,
				     ompi_status_public_t * status)
{
    int ret = OMPI_SUCCESS;
    OMPI_MPI_OFFSET_TYPE prev_offset;
    ompio_io_ompio_file_get_position (fh, &prev_offset );

    ompi_io_ompio_set_explicit_offset (fh, offset);
    ret = fh->f_fcoll->fcoll_file_read_all (fh,
                                            buf,
                                            count,
                                            datatype,
                                            status);

    ompi_io_ompio_set_explicit_offset (fh, prev_offset);
    return ret;
}
int ompio_io_ompio_file_read_at_all_begin (mca_io_ompio_file_t *fh,
					   OMPI_MPI_OFFSET_TYPE offset,
					   void *buf,
					   int count,
					   struct ompi_datatype_t *datatype)
{
    int ret = OMPI_SUCCESS;
    OMPI_MPI_OFFSET_TYPE prev_offset;
    ompio_io_ompio_file_get_position (fh, &prev_offset );

    ompi_io_ompio_set_explicit_offset (fh, offset);
    ret = fh->f_fcoll->fcoll_file_read_all_begin (fh,
						  buf,
						  count,
						  datatype);
    
    /* It is OK to reset the position already here, althgouth 
    ** the operation might still be pending/ongoing, since
    ** the entire array of <offset, length, memaddress> have 
    ** already been constructed in the file_read_all_begin operation
    */
    ompi_io_ompio_set_explicit_offset (fh, prev_offset);
    return ret;
}
示例#6
0
int
mca_io_ompio_file_seek (ompi_file_t *fh,
                        OMPI_MPI_OFFSET_TYPE off,
                        int whence)
{
    int ret = OMPI_SUCCESS;
    mca_io_ompio_data_t *data;
    OMPI_MPI_OFFSET_TYPE offset, temp_offset;

    data = (mca_io_ompio_data_t *) fh->f_io_selected_data;

    offset = off * data->ompio_fh.f_etype_size;

    switch(whence) {
    case MPI_SEEK_SET:
        if (offset < 0) {
            return OMPI_ERROR;
        }
        break;
    case MPI_SEEK_CUR:
        offset += data->ompio_fh.f_position_in_file_view;
        offset += data->ompio_fh.f_disp;
        if (offset < 0) {
            return OMPI_ERROR;
        }
        break;
    case MPI_SEEK_END:
        ret = data->ompio_fh.f_fs->fs_file_get_size (&data->ompio_fh,
                                                     &temp_offset);
        offset += temp_offset;
        if (offset < 0 || OMPI_SUCCESS != ret) {
            return OMPI_ERROR;
        }
        break;
    default:
        return OMPI_ERROR;
    }

    ret = ompi_io_ompio_set_explicit_offset (&data->ompio_fh,
                                             offset/data->ompio_fh.f_etype_size);
    return ret;
}
int
mca_io_ompio_file_read_at_all_begin (ompi_file_t *fh,
                                     OMPI_MPI_OFFSET_TYPE offset,
                                     void *buf,
                                     int count,
                                     struct ompi_datatype_t *datatype)
{
    int ret = OMPI_SUCCESS;
    mca_io_ompio_data_t *data;

    data = (mca_io_ompio_data_t *) fh->f_io_selected_data;

    ompi_io_ompio_set_explicit_offset (&data->ompio_fh, offset);

    ret = data->ompio_fh.
        f_fcoll->fcoll_file_read_all_begin (&data->ompio_fh, 
                                           buf,
                                           count, 
                                           datatype);

    return ret;
}
int
mca_io_ompio_file_read_at (ompi_file_t *fh,
                           OMPI_MPI_OFFSET_TYPE offset,
                           void *buf,
                           int count,
                           struct ompi_datatype_t *datatype,
                           ompi_status_public_t * status)
{
    int ret = OMPI_SUCCESS;
    mca_io_ompio_data_t *data;

    data = (mca_io_ompio_data_t *) fh->f_io_selected_data;

    ompi_io_ompio_set_explicit_offset (&data->ompio_fh, offset);

    mca_io_ompio_file_read (fh,
                            buf,
                            count,
                            datatype,
                            status);
    return ret;
}
int
mca_io_ompio_file_iwrite_at (ompi_file_t *fh,
                             OMPI_MPI_OFFSET_TYPE offset,
                             void *buf,
                             int count,
                             struct ompi_datatype_t *datatype,
                             ompi_request_t **request)
{
    int ret = OMPI_SUCCESS;
    mca_io_ompio_data_t *data;
        
    data = (mca_io_ompio_data_t *) fh->f_io_selected_data;

    ompi_io_ompio_set_explicit_offset (&data->ompio_fh, offset);

    ret = mca_io_ompio_file_iwrite (fh,
                                    buf,
                                    count,
                                    datatype,
                                    request);
    return ret;
}
示例#10
0
int
ompio_io_ompio_file_open (ompi_communicator_t *comm,
                        const char *filename,
                        int amode,
                        ompi_info_t *info,
                        mca_io_ompio_file_t *ompio_fh, bool use_sharedfp)
{
    int ret = OMPI_SUCCESS;
    int remote_arch;


    ompio_fh->f_iov_type = MPI_DATATYPE_NULL;
    ompio_fh->f_comm     = MPI_COMM_NULL;

    if ( ((amode&MPI_MODE_RDONLY)?1:0) + ((amode&MPI_MODE_RDWR)?1:0) +
	 ((amode&MPI_MODE_WRONLY)?1:0) != 1 ) {
	return MPI_ERR_AMODE;
    }

    if ((amode & MPI_MODE_RDONLY) &&
        ((amode & MPI_MODE_CREATE) || (amode & MPI_MODE_EXCL))) {
	return  MPI_ERR_AMODE;
    }

    if ((amode & MPI_MODE_RDWR) && (amode & MPI_MODE_SEQUENTIAL)) {
	return MPI_ERR_AMODE;
    }

    ompio_fh->f_rank     = ompi_comm_rank (comm);
    ompio_fh->f_size     = ompi_comm_size (comm);
    remote_arch = opal_local_arch;
    ompio_fh->f_convertor = opal_convertor_create (remote_arch, 0);

    if ( true == use_sharedfp ) {
	ret = ompi_comm_dup (comm, &ompio_fh->f_comm);
	if ( OMPI_SUCCESS != ret )  {
	    goto fn_fail;
	}
    }
    else {
	/* No need to duplicate the communicator if the file_open is called
	   from the sharedfp component, since the comm used as an input
	   is already a dup of the user level comm. */
	ompio_fh->f_flags |= OMPIO_SHAREDFP_IS_SET;
	ompio_fh->f_comm = comm;
    }

    ompio_fh->f_fstype = NONE;
    ompio_fh->f_amode  = amode;
    ompio_fh->f_info   = info;
    ompio_fh->f_atomicity = 0;

    ompi_io_ompio_set_file_defaults (ompio_fh);
    ompio_fh->f_filename = filename;

    ompio_fh->f_split_coll_req    = NULL;
    ompio_fh->f_split_coll_in_use = false;

    /*Initialize the print_queues queues here!*/
    coll_write_time = (mca_io_ompio_print_queue *) malloc (sizeof(mca_io_ompio_print_queue));
    coll_read_time = (mca_io_ompio_print_queue *) malloc (sizeof(mca_io_ompio_print_queue));

    ompi_io_ompio_initialize_print_queue(coll_write_time);
    ompi_io_ompio_initialize_print_queue(coll_read_time);

    /* set some function pointers required for fcoll, fbtls and sharedfp modules*/
    ompio_fh->f_decode_datatype=ompi_io_ompio_decode_datatype;
    ompio_fh->f_generate_current_file_view=ompi_io_ompio_generate_current_file_view;

    ompio_fh->f_sort=ompi_io_ompio_sort;
    ompio_fh->f_sort_iovec=ompi_io_ompio_sort_iovec;

    ompio_fh->f_allgather_array=ompi_io_ompio_allgather_array;
    ompio_fh->f_allgatherv_array=ompi_io_ompio_allgatherv_array;
    ompio_fh->f_gather_array=ompi_io_ompio_gather_array;
    ompio_fh->f_gatherv_array=ompi_io_ompio_gatherv_array;

    ompio_fh->f_get_num_aggregators=mca_io_ompio_get_num_aggregators;
    ompio_fh->f_get_bytes_per_agg=mca_io_ompio_get_bytes_per_agg;
    ompio_fh->f_set_aggregator_props=ompi_io_ompio_set_aggregator_props;

    ompio_fh->f_full_print_queue=ompi_io_ompio_full_print_queue;
    ompio_fh->f_register_print_entry=ompi_io_ompio_register_print_entry;

    /* This fix is needed for data seiving to work with
       two-phase collective I/O */
     if ((amode & MPI_MODE_WRONLY)){
       amode -= MPI_MODE_WRONLY;
       amode += MPI_MODE_RDWR;
     }
     /*--------------------------------------------------*/


    if (OMPI_SUCCESS != (ret = mca_fs_base_file_select (ompio_fh,
                                                        NULL))) {
        opal_output(1, "mca_fs_base_file_select() failed\n");
        goto fn_fail;
    }
    if (OMPI_SUCCESS != (ret = mca_fbtl_base_file_select (ompio_fh,
                                                          NULL))) {
        opal_output(1, "mca_fbtl_base_file_select() failed\n");
        goto fn_fail;
    }

    if (OMPI_SUCCESS != (ret = mca_fcoll_base_file_select (ompio_fh,
                                                           NULL))) {
        opal_output(1, "mca_fcoll_base_file_select() failed\n");
        goto fn_fail;
    }

    ompio_fh->f_sharedfp_component = NULL; /*component*/
    ompio_fh->f_sharedfp           = NULL; /*module*/
    ompio_fh->f_sharedfp_data      = NULL; /*data*/

    if ( true == use_sharedfp ) {
	if (OMPI_SUCCESS != (ret = mca_sharedfp_base_file_select (ompio_fh, NULL))) {
	    opal_output ( ompi_io_base_framework.framework_output,
			  "mca_sharedfp_base_file_select() failed\n");
	    ompio_fh->f_sharedfp           = NULL; /*module*/
	    /* Its ok to not have a shared file pointer module as long as the shared file
	    ** pointer operations are not used. However, the first call to any file_read/write_shared
	    ** function will return an error code.
	    */
	}

	/* open the file once more for the shared file pointer if required.
	** Per default, the shared file pointer specific actions are however
	** only performed on first access of the shared file pointer, except
	** for the addproc sharedfp component.
	**
	** Lazy open does not work for the addproc sharedfp
	** component since it starts by spawning a process using MPI_Comm_spawn.
	** For this, the first operation has to be collective which we can
	** not guarantuee outside of the MPI_File_open operation.
	*/
	if ( NULL != ompio_fh->f_sharedfp &&
	     true == use_sharedfp &&
	     (!mca_io_ompio_sharedfp_lazy_open ||
	      !strcmp (ompio_fh->f_sharedfp_component->mca_component_name,
		       "addproc")               )) {
	    ret = ompio_fh->f_sharedfp->sharedfp_file_open(comm,
							   filename,
							   amode,
							   info,
							   ompio_fh);

	    if ( OMPI_SUCCESS != ret ) {
		goto fn_fail;
	    }
	}
    }

     /*Determine topology information if set*/
    if (ompio_fh->f_comm->c_flags & OMPI_COMM_CART){
        ret = mca_io_ompio_cart_based_grouping(ompio_fh);
	if(OMPI_SUCCESS != ret ){
	    ret = MPI_ERR_FILE;
	}
    }

    ret = ompio_fh->f_fs->fs_file_open (comm,
					filename,
					amode,
					info,
					ompio_fh);




    if ( OMPI_SUCCESS != ret ) {
	ret = MPI_ERR_FILE;
        goto fn_fail;
    }


    /* If file has been opened in the append mode, move the internal
       file pointer of OMPIO to the very end of the file. */
    if ( ompio_fh->f_amode & MPI_MODE_APPEND ) {
        OMPI_MPI_OFFSET_TYPE current_size;

        ompio_fh->f_fs->fs_file_get_size( ompio_fh,
                                          &current_size);
        ompi_io_ompio_set_explicit_offset (ompio_fh, current_size);
    }



    return OMPI_SUCCESS;

    fn_fail:
        /* no need to free resources here, since the destructor
	 * is calling mca_io_ompio_file_close, which actually gets
	 *rid of all allocated memory items */

    return ret;
}
int
ompio_io_ompio_file_open (ompi_communicator_t *comm,
                          char *filename,
                          int amode,
                          ompi_info_t *info,
                          mca_io_ompio_file_t *ompio_fh, bool use_sharedfp)
{
    int ret = OMPI_SUCCESS;
    int remote_arch;


    if ( ((amode&MPI_MODE_RDONLY)?1:0) + ((amode&MPI_MODE_RDWR)?1:0) +
            ((amode&MPI_MODE_WRONLY)?1:0) != 1 ) {
        return MPI_ERR_AMODE;
    }

    if ((amode & MPI_MODE_RDONLY) &&
            ((amode & MPI_MODE_CREATE) || (amode & MPI_MODE_EXCL))) {
        return  MPI_ERR_AMODE;
    }

    if ((amode & MPI_MODE_RDWR) && (amode & MPI_MODE_SEQUENTIAL)) {
        return MPI_ERR_AMODE;
    }

    ompio_fh->f_iov_type = MPI_DATATYPE_NULL;
    ompio_fh->f_rank     = ompi_comm_rank (comm);
    ompio_fh->f_size     = ompi_comm_size (comm);
    remote_arch = opal_local_arch;
    ompio_fh->f_convertor = opal_convertor_create (remote_arch, 0);

    ret = ompi_comm_dup (comm, &ompio_fh->f_comm);
    if ( ret != OMPI_SUCCESS )  {
        goto fn_fail;
    }


    ompio_fh->f_fstype = NONE;
    ompio_fh->f_amode  = amode;
    ompio_fh->f_info   = info;
    ompio_fh->f_atomicity = 0;

    ompi_io_ompio_set_file_defaults (ompio_fh);
    ompio_fh->f_filename = filename;

    /*Initialize the print_queues queues here!*/
    coll_write_time = (print_queue *) malloc (sizeof(print_queue));
    coll_read_time = (print_queue *) malloc (sizeof(print_queue));

    ompi_io_ompio_initialize_print_queue(coll_write_time);
    ompi_io_ompio_initialize_print_queue(coll_read_time);

    /*
    if (MPI_INFO_NULL != info)  {
        ret = ompi_info_dup (info, &ompio_fh->f_info);
    if (OMPI_SUCCESS != ret) {
             goto fn_fail;
    }
    }
    */
    /* This fix is needed for data seiving to work with
       two-phase collective I/O */
    if ((amode & MPI_MODE_WRONLY)) {
        amode -= MPI_MODE_WRONLY;
        amode += MPI_MODE_RDWR;
    }
    /*--------------------------------------------------*/


    if (OMPI_SUCCESS != (ret = mca_fs_base_file_select (ompio_fh,
                               NULL))) {
        opal_output(1, "mca_fs_base_file_select() failed\n");
        goto fn_fail;
    }
    if (OMPI_SUCCESS != (ret = mca_fbtl_base_file_select (ompio_fh,
                               NULL))) {
        opal_output(1, "mca_fbtl_base_file_select() failed\n");
        goto fn_fail;
    }

    if (OMPI_SUCCESS != (ret = mca_fcoll_base_file_select (ompio_fh,
                               NULL))) {
        opal_output(1, "mca_fcoll_base_file_select() failed\n");
        goto fn_fail;
    }

    ompio_fh->f_sharedfp_component = NULL; /*component*/
    ompio_fh->f_sharedfp           = NULL; /*module*/
    ompio_fh->f_sharedfp_data      = NULL; /*data*/

    if (OMPI_SUCCESS != (ret = mca_sharedfp_base_file_select (ompio_fh, NULL))) {
        opal_output(1, "mca_sharedfp_base_file_select() failed\n");
        goto fn_fail;
    }

    /*Determine topology information if set*/
    if (ompio_fh->f_comm->c_flags & OMPI_COMM_CART) {
        ret = mca_io_ompio_cart_based_grouping(ompio_fh);
        if(OMPI_SUCCESS != ret ) {
            ret = MPI_ERR_FILE;
        }
    }

    ret = ompio_fh->f_fs->fs_file_open (comm,
                                        filename,
                                        amode,
                                        info,
                                        ompio_fh);




    if ( OMPI_SUCCESS != ret ) {
        ret = MPI_ERR_FILE;
        goto fn_fail;
    }

    /* open the file once more for the shared file pointer if required.
    ** Per default, the shared file pointer specific actions are however
    ** only performed on first access of the shared file pointer, except
    ** for the addproc sharedfp component.
    **
    ** Lazy open does not work for the addproc sharedfp
    ** component since it starts by spawning a process using MPI_Comm_spawn.
    ** For this, the first operation has to be collective which we can
    ** not guarantuee outside of the MPI_File_open operation.
    */
    if ( true == use_sharedfp &&
            (!mca_io_ompio_sharedfp_lazy_open ||
             !strcmp (ompio_fh->f_sharedfp_component->mca_component_name,
                      "addproc")               )) {
        ret = ompio_fh->f_sharedfp->sharedfp_file_open(comm,
                filename,
                amode,
                info,
                ompio_fh);

        if ( OMPI_SUCCESS != ret ) {
            goto fn_fail;
        }
    }

    /* If file has been opened in the append mode, move the internal
       file pointer of OMPIO to the very end of the file. */
    if ( ompio_fh->f_amode & MPI_MODE_APPEND ) {
        OMPI_MPI_OFFSET_TYPE current_size;

        ompio_fh->f_fs->fs_file_get_size( ompio_fh,
                                          &current_size);
        ompi_io_ompio_set_explicit_offset (ompio_fh, current_size);
    }



    return OMPI_SUCCESS;

fn_fail:
    /* no need to free resources here, since the destructor
    * is calling mca_io_ompio_file_close, which actually gets
     *rid of all allocated memory items */

    return ret;
}