示例#1
0
void read() {

	MPI_Offset offset;
	MPI_Status status;
	double time;
	int atomPerProc, timesteps;
	  
	//4000000 atom positions written from 255 processes 
	totalAtoms = 256000;
	timesteps = 20;
	atomPerProc = totalAtoms/nprocs;
	pos = (float *) malloc (3 * atomPerProc * sizeof(float));
	MPI_Offset mpifo = me * 3 * atomPerProc * sizeof(float);

	int n = 0;
	MPI_File_open(MPI_COMM_WORLD, posfile, MPI_MODE_RDONLY, MPI_INFO_NULL, &posfh);
	MPI_File_read_at_all(posfh, mpifo, pos, 3*atomPerProc, MPI_FLOAT, &status);
	MPI_File_close(&posfh);

	MPI_Get_count (&status, MPI_FLOAT, &rcount);
//	if (me < 4) 
	printf("%d: have read %d floats\n", me, rcount);

//	if (n < 3 && me < 4)
		for(int i = 0; i < 10 ; i++) 
			printf("%d: %d: read %dth atom %f %f %f\n", me, n, i, pos[i*PAD+0], pos[i*PAD+1], pos[i*PAD+2]);

}
示例#2
0
int lemonReaderReadData(void *dest, MPI_Offset *nbytes, LemonReader *reader)
{
  MPI_Status status;
  int err;
  int read;

  if ((reader == (LemonReader*)NULL) || (dest == NULL))
  {
    fprintf(stderr, "[LEMON] Node %d reports in lemonReaderReadData:\n"
                    "        NULL pointer or uninitialized reader provided.\n", reader->my_rank);
    return LEMON_ERR_PARAM;
  }

  err = MPI_File_read_at_all(*reader->fp, reader->off + reader->pos, dest, *nbytes, MPI_BYTE, &status);
  MPI_Barrier(reader->cartesian);

  if (err != MPI_SUCCESS)
  {
    fprintf(stderr, "[LEMON] Node %d reports in lemonReaderReadData:\n"
                    "        MPI_File_read_at_all returned error code %d.\n", reader->my_rank, err);
    return LEMON_ERR_READ;
  }

  MPI_Get_count(&status, MPI_BYTE, &read);
  *nbytes = (uint64_t)read;
  reader->pos += *nbytes;

  return LEMON_SUCCESS;
}
示例#3
0
int main(int argc, char ** argv) 
{
    MPI_Info info = MPI_INFO_NULL;
    MPI_File fh;
    MPI_Offset off=0;
    MPI_Status status;
    int errcode;
    int i, rank, errs=0, toterrs, buffer[BUFSIZE], buf2[BUFSIZE];

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Info_create(&info);
    MPI_Info_set(info, "romio_cb_write", "enable");
    MPI_Info_set(info, "cb_nodes", "1");

    for (i=0; i<BUFSIZE; i++) {
        buffer[i] = 10000+rank;
    }
    off = rank*sizeof(buffer);

    errcode = MPI_File_open(MPI_COMM_WORLD, argv[1], 
		MPI_MODE_WRONLY|MPI_MODE_CREATE, info, &fh);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_open");
    errcode = MPI_File_write_at_all(fh, off, buffer, BUFSIZE, 
		MPI_INT,  &status);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_write_at_all");
    errcode = MPI_File_close(&fh);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_close");

    errcode = MPI_File_open(MPI_COMM_WORLD, argv[1], 
		MPI_MODE_RDONLY, info, &fh);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_open");
    errcode = MPI_File_read_at_all(fh, off, buf2, BUFSIZE, 
		MPI_INT,  &status);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_read_at_all");
    errcode = MPI_File_close(&fh);
    if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_close");

    for (i=0; i<BUFSIZE; i++) {
        if (buf2[i] != 10000+rank)
	    errs++;
    }
    MPI_Allreduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
    if (rank == 0) {
	if( toterrs > 0) {
	    fprintf( stderr, "Found %d errors\n", toterrs );
	}
	else {
	    fprintf( stdout, " No Errors\n" );
	}
    }
    MPI_Info_free(&info);
    MPI_Finalize();

    return 0;
}
示例#4
0
FORTRAN_API void FORT_CALL mpi_file_read_at_all_(MPI_Fint *fh,MPI_Offset *offset,void *buf,
                         MPI_Fint *count,MPI_Fint *datatype,
                         MPI_Status *status, MPI_Fint *ierr )
{
    MPI_File fh_c;
    
    fh_c = MPI_File_f2c(*fh);
    *ierr = MPI_File_read_at_all(fh_c,*offset,buf,*count,(MPI_Datatype)*datatype,status);
}
示例#5
0
static int verify_type(char *filename, MPI_Datatype type,
	int64_t expected_extent, int do_coll)
{
    int rank, canary;
    MPI_Count tsize;
    int compare=-1;
    int errs=0, toterrs=0;
    MPI_Status status;
    MPI_File fh;

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    CHECK( MPI_File_open(MPI_COMM_WORLD, filename,
		MPI_MODE_CREATE|MPI_MODE_RDWR, MPI_INFO_NULL, &fh));
    CHECK( MPI_File_set_view(fh, rank*sizeof(int),
	    MPI_BYTE, type, "native", MPI_INFO_NULL));

    MPI_Type_size_x(type, &tsize);

    canary=rank+1000000;

    /* skip over first instance of type */
    if (do_coll) {
	CHECK( MPI_File_write_at_all(fh, tsize, &canary, 1, MPI_INT, &status));
    } else {
	CHECK( MPI_File_write_at(fh, tsize, &canary, 1, MPI_INT, &status));
    }

    CHECK( MPI_File_set_view(fh, 0, MPI_INT, MPI_INT, "native",
		MPI_INFO_NULL));

    if (do_coll) {
	CHECK( MPI_File_read_at_all(fh, expected_extent/sizeof(int)+rank,
		&compare, 1, MPI_INT, &status));
    } else {
	CHECK( MPI_File_read_at(fh, expected_extent/sizeof(int)+rank,
		&compare, 1, MPI_INT, &status));
    }

    if (compare != canary)
	errs=1;
    MPI_Allreduce(&errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

    MPI_File_close(&fh);

    if (toterrs) {
	printf("%d: got %d expected %d\n", rank, compare, canary);
	/* keep file if there's an error */
    } else {
	if (rank == 0) MPI_File_delete(filename, MPI_INFO_NULL);
    }

    return (toterrs);

}
示例#6
0
void mpi_file_read_at_all_(MPI_Fint *fh,MPI_Offset *offset,void *buf,
                         MPI_Fint *count,MPI_Fint *datatype,
                         MPI_Status *status, MPI_Fint *ierr )
{
    MPI_File fh_c;
    MPI_Datatype datatype_c;

    fh_c = MPI_File_f2c(*fh);
    datatype_c = MPI_Type_f2c(*datatype);

    *ierr = MPI_File_read_at_all(fh_c,*offset,buf,*count,datatype_c,status);
}
示例#7
0
/*
 * mpi_io_shared
 *
 * creates a single-shared-file
 * writes with independent-io
 * reads with independent-io
 * writes with collective-io
 * reads with collective-io
 */
int mpi_io_shared (char *path, int size, int rank)
{
    MPI_File fh;
    char filepath[512];
    MPI_Offset offset;
    MPI_Status status;
    void *buf;
    int bufcount = BYTES_PER_RANK;
    int rc;

    buf = malloc(bufcount);
    if (!buf) { return 0; }

    memset(buf, 0xa, bufcount);

    sprintf(filepath, "%s/%s", path, "cp-bench-mpio-shared");
    rc = MPI_File_open(MPI_COMM_WORLD,
                       filepath,
                       (MPI_MODE_CREATE|MPI_MODE_RDWR|MPI_MODE_DELETE_ON_CLOSE),
                       MPI_INFO_NULL,
                       &fh);
    MPI_CHECK(rc,"MPI_File_open");

    /* Indep Write */
    offset = rank * bufcount;
    rc = MPI_File_write_at(fh,offset,buf,bufcount,MPI_BYTE,&status);
    MPI_CHECK(rc,"MPI_File_write_at");

    MPI_Barrier(MPI_COMM_WORLD);

    /* Indep Read */
    offset = ((rank+1)%size) * bufcount;
    rc = MPI_File_read_at(fh,offset,buf,bufcount,MPI_BYTE,&status);
    MPI_CHECK(rc,"MPI_File_read_at");

    /* Collective Write */
    offset = rank * bufcount;
    rc = MPI_File_write_at_all(fh, offset, buf, bufcount, MPI_BYTE, &status);
    MPI_CHECK(rc,"MPI_File_write_at_all");

    /* Collective Read */
    offset = ((rank+1)%size) * bufcount;
    rc = MPI_File_read_at_all(fh, offset, buf, bufcount, MPI_BYTE, &status);
    MPI_CHECK(rc,"MPI_File_read_at_all");

    rc = MPI_File_close(&fh);
    MPI_CHECK(rc,"MPI_File_close");

    free(buf);

    return 1;
}
示例#8
0
FORT_DLL_SPEC void FORT_CALL mpi_file_read_at_all_ ( MPI_Fint *v1, MPI_Offset *v2, void*v3, MPI_Fint *v4, MPI_Fint *v5, MPI_Fint *v6, MPI_Fint *ierr ){
#ifdef MPI_MODE_RDONLY

#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
    if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif

    if (v6 == MPI_F_STATUS_IGNORE) { v6 = (MPI_Fint*)MPI_STATUS_IGNORE; }
    *ierr = MPI_File_read_at_all( MPI_File_f2c(*v1), (MPI_Offset)*v2, v3, (int)*v4, (MPI_Datatype)(*v5), (MPI_Status *)v6 );
#else
*ierr = MPI_ERR_INTERN;
#endif
}
示例#9
0
JNIEXPORT void JNICALL Java_mpi_File_readAtAll(
        JNIEnv *env, jobject jthis, jlong fh, jlong fileOffset,
        jobject buf, jboolean db, jint off, jint count,
        jlong jType, jint bType, jlongArray stat)
{
    MPI_Datatype type = (MPI_Datatype)jType;
    void *ptr;
    ompi_java_buffer_t *item;
    ompi_java_getWritePtr(&ptr, &item, env, buf, db, count, type);
    MPI_Status status;

    int rc = MPI_File_read_at_all((MPI_File)fh, (MPI_Offset)fileOffset,
                                  ptr, count, type, &status);

    ompi_java_exceptionCheck(env, rc);
    ompi_java_releaseWritePtr(ptr, item, env, buf, db, off, count, type, bType);
    ompi_java_status_set(env, stat, &status);
}
示例#10
0
文件: ex2_heat.c 项目: natj/csc-ss14
void read_restart(field *temperature, parallel_data *parallel, int *iter)
{
	MPI_File fp;
	int full_nx, full_ny;
	int disp, size;

	// initialise MPI metadata with bogus dimensions
	parallel_initialize(parallel, 0, 0);
	// open file for reading
	MPI_File_open(parallel->comm, CHECKPOINT, MPI_MODE_RDONLY,
			MPI_INFO_NULL, &fp);

	// read grid size and current iteration
	MPI_File_read_all(fp, &full_nx, 1, MPI_INT, MPI_STATUS_IGNORE);
	MPI_File_read_all(fp, &full_ny, 1, MPI_INT, MPI_STATUS_IGNORE);
	MPI_File_read_all(fp, iter, 1, MPI_INT, MPI_STATUS_IGNORE);
	// set correct dimensions to MPI metadata
	parallel_set_dimensions(parallel, full_nx, full_ny);
	// set local dimensions and allocate memory for the data
	initialize_field_metadata(temperature, full_nx, full_ny, parallel);
	allocate_field(temperature);

	// size of the local data including the outermost ghost row if at the
	// top or the bottom of the full grid
	if ((parallel->rank == 0) || (parallel->rank == parallel->size - 1)) {
		size = (temperature->nx + 2) * (temperature->ny + 1);
	} else {
		size = (temperature->nx + 2) * temperature->ny;
	}

	// point each MPI task to the correct part of the file
	disp = 3 * sizeof(int);
	if (parallel->rank > 0) {
		disp += (1 + parallel->rank * temperature->ny) * 
			(temperature->nx + 2) * sizeof(double);
	}

	// read data simultaneously to all processes
	MPI_File_read_at_all(fp, disp, &temperature->data[0][0], 
			size, MPI_DOUBLE, MPI_STATUS_IGNORE);

	// close up shop
	MPI_File_close(&fp);
}
示例#11
0
static void
read_file( char *target, int rank, MPI_Info *info, int *corrupt_blocks ) {
    MPI_File rfh;
    MPI_Status mpi_stat;
    int mpi_ret;
    int i;
    char buffer[OBJ_SIZE];
    char *verify_buf = NULL;
    verify_buf = (char *)malloc(OBJ_SIZE);

    if ( debug ) printf( "%d reading file %s\n", rank, target );
    
    if( (mpi_ret = MPI_File_open(MPI_COMM_WORLD, target, 
                    MPI_MODE_RDONLY, *info, &rfh ) ) != MPI_SUCCESS ) 
    {
        fatal_error( mpi_ret, NULL, "open for read" );
    }

    for( i = 0; i < NUM_OBJS; i++ ) {
        MPI_Offset offset = get_offset( rank, NUM_OBJS, OBJ_SIZE, i );
        fill_buffer( verify_buf, OBJ_SIZE, rank, offset );
        if ( debug ) printf( "Expecting %s", buffer );
        if ( (mpi_ret = MPI_File_read_at_all( rfh, offset, buffer, OBJ_SIZE,
                        MPI_CHAR, &mpi_stat ) ) != MPI_SUCCESS ) 
        {
            fatal_error( mpi_ret, &mpi_stat, "read" );
        }
        if ( memcmp( verify_buf, buffer, OBJ_SIZE ) != 0 ) {
            (*corrupt_blocks)++;
            printf( "Corruption at %lld\n", offset );
            if ( debug ) {
                printf( "\tExpecting %s\n"
                         "\tRecieved  %s\n",
                         verify_buf, buffer );
            }
        }
    }

    if( (mpi_ret = MPI_File_close( &rfh ) ) != MPI_SUCCESS ) {
        fatal_error( mpi_ret, NULL, "close for read" );
    }
    free(verify_buf);

}
示例#12
0
int lemonReadLatticeParallelMapped(LemonReader *reader, void *data, MPI_Offset siteSize, int const *latticeDims, int const *mapping)
{
  int        read;
  int        error;
  MPI_Status status;
  LemonSetup setup;

  error = lemonClearReaderState(reader);
  if (error != LEMON_SUCCESS)
    return error;

  lemonSetupIOTypes(&setup, reader->cartesian, siteSize, latticeDims, mapping);

  /* Install the data organization we worked out above on the file as a view.
     We keep the individual file pointers synchronized explicitly, so assume they are here. */
  MPI_File_set_view(*reader->fp, reader->off + reader->pos, setup.etype, setup.ftype, "native", MPI_INFO_NULL);

  /* Blast away! */
  MPI_File_read_at_all(*reader->fp, reader->pos, data, setup.localVol, setup.etype, &status);
  MPI_Barrier(reader->cartesian);

  /* Synchronize the file pointer */
  MPI_Get_count(&status, MPI_BYTE, &read);
  reader->pos += setup.totalVol * siteSize;

  /* We want to leave the file in a well-defined state, so we reset the view to a default. */
  /* We don't want to reread any data, so we maximize the file pointer globally. */
  MPI_Barrier(reader->cartesian);
  MPI_File_set_view(*reader->fp, 0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL);

  lemonFreeIOTypes(&setup);

  /* Doing a data read should never get us to EOF, only header scanning -- any shortfall is an error */
  if (read != siteSize * setup.localVol)
  {
    fprintf(stderr, "[LEMON] Node %d reports in lemonReadLatticeParallel:\n"
                    "        Could not read the required amount of data.\n", reader->my_rank);
    return LEMON_ERR_READ;
  }

  return LEMON_SUCCESS;
}
示例#13
0
int test_file(char *filename, int mynod, int nprocs, char * cb_hosts, const char *msg, int verbose)
{
    MPI_Datatype typevec, newtype, t[3];
    int *buf, i, b[3], errcode, errors=0;
    MPI_File fh;
    MPI_Aint d[3];
    MPI_Status status;
    int SIZE = (STARTING_SIZE/nprocs)*nprocs;
    MPI_Info info;

    if (mynod==0 && verbose) fprintf(stderr, "%s\n", msg);

    buf = (int *) malloc(SIZE*sizeof(int));
    if (buf == NULL) {
	    perror("test_file");
	    MPI_Abort(MPI_COMM_WORLD, -1);
    }


    if (cb_hosts != NULL ) {
	    MPI_Info_create(&info);
	    MPI_Info_set(info, "cb_config_list", cb_hosts);
    } else {
	    info = MPI_INFO_NULL;
    }

    MPI_Type_vector(SIZE/nprocs, 1, nprocs, MPI_INT, &typevec);

    b[0] = b[1] = b[2] = 1;
    d[0] = 0;
    d[1] = mynod*sizeof(int);
    d[2] = SIZE*sizeof(int);
    t[0] = MPI_LB;
    t[1] = typevec;
    t[2] = MPI_UB;

    MPI_Type_struct(3, b, d, t, &newtype);
    MPI_Type_commit(&newtype);
    MPI_Type_free(&typevec);

    if (!mynod) {
	if(verbose) fprintf(stderr, "\ntesting noncontiguous in memory, noncontiguous in file using collective I/O\n");
	MPI_File_delete(filename, info);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    errcode = MPI_File_open(MPI_COMM_WORLD, filename,
		    MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "MPI_File_open");
    }

    MPI_File_set_view(fh, 0, MPI_INT, newtype, "native", info);

    for (i=0; i<SIZE; i++) buf[i] = SEEDER(mynod,i,SIZE);
    errcode = MPI_File_write_all(fh, buf, 1, newtype, &status);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "nc mem - nc file: MPI_File_write_all");
    }

    MPI_Barrier(MPI_COMM_WORLD);

    for (i=0; i<SIZE; i++) buf[i] = -1;

    errcode = MPI_File_read_at_all(fh, 0, buf, 1, newtype, &status);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "nc mem - nc file: MPI_File_read_at_all");
    }

    /* the verification for N compute nodes is tricky. Say we have 3
     * processors.
     * process 0 sees: 0 -1 -1 3 -1 -1 ...
     * process 1 sees: -1 34 -1 -1 37 -1 ...
     * process 2 sees: -1 -1 68 -1 -1 71 ... */

    /* verify those leading -1s exist if they should */
    for (i=0; i<mynod; i++ ) {
	    if ( buf[i] != -1 ) {
		    if(verbose) fprintf(stderr, "Process %d: buf is %d, should be -1\n", mynod, buf[i]);
		    errors++;
	    }
    }
    /* now the modulo games are hairy.  processor 0 sees real data in the 0th,
     * 3rd, 6th... elements of the buffer (assuming nprocs==3 ).  proc 1 sees
     * the data in 1st, 4th, 7th..., and proc 2 sees it in 2nd, 5th, 8th */

    for(/* 'i' set in above loop */; i<SIZE; i++) {
	    if ( ((i-mynod)%nprocs) && buf[i] != -1)  {
		    if(verbose) fprintf(stderr, "Process %d: buf %d is %d, should be -1\n",
				    mynod, i, buf[i]);
		    errors++;
	    }
	    if ( !((i-mynod)%nprocs) && buf[i] != SEEDER(mynod,i,SIZE) ) {
		    if(verbose) fprintf(stderr, "Process %d: buf %d is %d, should be %d\n",
				    mynod, i, buf[i], SEEDER(mynod,i,SIZE));
		    errors++;
	    }
    }
    MPI_File_close(&fh);

    MPI_Barrier(MPI_COMM_WORLD);

    if (!mynod) {
	if(verbose) fprintf(stderr, "\ntesting noncontiguous in memory, contiguous in file using collective I/O\n");
	MPI_File_delete(filename, info);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
                  info, &fh);

    for (i=0; i<SIZE; i++) buf[i] = SEEDER(mynod,i,SIZE);
    errcode = MPI_File_write_at_all(fh, mynod*(SIZE/nprocs)*sizeof(int),
		    buf, 1, newtype, &status);
    if (errcode != MPI_SUCCESS)
	    handle_error(errcode, "nc mem - c file: MPI_File_write_at_all");

    MPI_Barrier(MPI_COMM_WORLD);

    for (i=0; i<SIZE; i++) buf[i] = -1;

    errcode = MPI_File_read_at_all(fh, mynod*(SIZE/nprocs)*sizeof(int),
		    buf, 1, newtype, &status);
    if (errcode != MPI_SUCCESS)
	    handle_error(errcode, "nc mem - c file: MPI_File_read_at_all");

    /* just like as above */
    for (i=0; i<mynod; i++ ) {
	    if ( buf[i] != -1 ) {
		    if(verbose) fprintf(stderr, "Process %d: buf is %d, should be -1\n", mynod, buf[i]);
		    errors++;
	    }
    }
    for(/* i set in above loop */; i<SIZE; i++) {
	    if ( ((i-mynod)%nprocs) && buf[i] != -1)  {
		    if(verbose) fprintf(stderr, "Process %d: buf %d is %d, should be -1\n",
				    mynod, i, buf[i]);
		    errors++;
	    }
	    if ( !((i-mynod)%nprocs) && buf[i] != SEEDER(mynod,i,SIZE)) {
		    if(verbose) fprintf(stderr, "Process %d: buf %d is %d, should be %d\n",
				    mynod, i, buf[i], SEEDER(mynod,i,SIZE) );
		    errors++;
	    }
    }

    MPI_File_close(&fh);

    MPI_Barrier(MPI_COMM_WORLD);

    if (!mynod) {
	if(verbose) fprintf(stderr, "\ntesting contiguous in memory, noncontiguous in file using collective I/O\n");
	MPI_File_delete(filename, info);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
                  info, &fh);

    MPI_File_set_view(fh, 0, MPI_INT, newtype, "native", info);

    for (i=0; i<SIZE; i++) buf[i] = SEEDER(mynod, i, SIZE);
    errcode = MPI_File_write_all(fh, buf, SIZE, MPI_INT, &status);
    if (errcode != MPI_SUCCESS)
	    handle_error(errcode, "c mem - nc file: MPI_File_write_all");

    MPI_Barrier(MPI_COMM_WORLD);

    for (i=0; i<SIZE; i++) buf[i] = -1;

    errcode = MPI_File_read_at_all(fh, 0, buf, SIZE, MPI_INT, &status);
    if (errcode != MPI_SUCCESS)
	    handle_error(errcode, "c mem - nc file: MPI_File_read_at_all");

    /* same crazy checking */
    for (i=0; i<SIZE; i++) {
	    if (buf[i] != SEEDER(mynod, i, SIZE)) {
		if(verbose) fprintf(stderr, "Process %d: buf %d is %d, should be %d\n", mynod, i, buf[i], SEEDER(mynod, i, SIZE));
		errors++;
	    }
    }

    MPI_File_close(&fh);

    MPI_Type_free(&newtype);
    free(buf);
    if (info != MPI_INFO_NULL) MPI_Info_free(&info);
    return errors;
}
void readlines(MPI_File *in, const int rank, const int size, const int overlap, char ***lines, int *nlines) {
	/*@see: http://stackoverflow.com/a/13328819/2521647 */
	MPI_Offset filesize;
	MPI_Offset localsize;
	MPI_Offset start;
	MPI_Offset end;
	char *chunk;

	/* figure out who reads what */

	MPI_File_get_size(*in, &filesize);
	localsize = filesize / size;
	start = rank * localsize;
	end = start + localsize - 1;

	/* add overlap to the end of everyone's chunk... */
	end += overlap;

	/* except the last processor, of course */
	if (rank == size - 1)
		end = filesize;

	localsize = end - start + 1;

	/* allocate memory */
	chunk = malloc((localsize + 1) * sizeof(char));

	/* everyone reads in their part */
	MPI_File_read_at_all(*in, start, chunk, localsize, MPI_CHAR,
			MPI_STATUS_IGNORE);
	chunk[localsize] = '\0';

	/*
	 * everyone calculate what their start and end *really* are by going
	 * from the first newline after start to the first newline after the
	 * overlap region starts (eg, after end - overlap + 1)
	 */

	int locstart = 0, locend = localsize;
	if (rank != 0) {
		while (chunk[locstart] != '\n')
			locstart++;
		locstart++;
	}
	if (rank != size - 1) {
		locend -= overlap;
		while (chunk[locend] != '\n')
			locend++;
	}
	localsize = locend - locstart + 1;

	/* Now let's copy our actual data over into a new array, with no overlaps */
	char *data = (char *) malloc((localsize + 1) * sizeof(char));
	memcpy(data, &(chunk[locstart]), localsize);
	data[localsize] = '\0';
	free(chunk);

	/* Now we'll count the number of lines */
	*nlines = 0;
	for (int i = 0; i < localsize; i++)
		if (data[i] == '\n')
			(*nlines)++;

	/* Now the array lines will point into the data array at the start of each line */
	/* assuming nlines > 1 */
	*lines = (char **) malloc((*nlines) * sizeof(char *));
	(*lines)[0] = strtok(data, "\n");
	for (int i = 1; i < (*nlines); i++)
		(*lines)[i] = strtok(NULL, "\n");

	return;
}
示例#15
0
int main(int argc, char** argv)
{

    MPI_File fh;
    MPI_Datatype file_type, mem_type;
    int *data = NULL;
    int *verify = NULL;
    int data_size = DATA_SIZE;
    int i, j,k, nr_errors=0;
    MPI_Aint disp[BLK_COUNT];
    int block_lens[BLK_COUNT];
    char* filename = "unnamed.dat";

    MPI_Init (&argc, &argv);
    disp[0] = (MPI_Aint)(PAD);
    disp[1] = (MPI_Aint)(data_size*1 + PAD);
    disp[2] = (MPI_Aint)(data_size*2 + PAD);

    block_lens[0] = data_size;
    block_lens[1] = data_size;
    block_lens[2] = data_size;

    data = malloc(data_size);
    verify = malloc(data_size*BLK_COUNT + HEADER + PAD);
    for(i=0 ; i<data_size/sizeof(int) ; i++)
        data[i] = i;

    MPI_Type_create_hindexed_block(BLK_COUNT, data_size, disp, MPI_BYTE, &file_type);
    MPI_Type_commit(&file_type);

    MPI_Type_create_hvector(BLK_COUNT, data_size, 0, MPI_BYTE, &mem_type);
    MPI_Type_commit(&mem_type);

    if( 1 < argc ) filename = argv[1];

    CHECK(MPI_File_open (MPI_COMM_WORLD, filename,
		MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE,
                      MPI_INFO_NULL, &fh) != 0);

    CHECK(MPI_File_set_view(fh, HEADER, MPI_BYTE,
                                         file_type, "native", MPI_INFO_NULL));

    /* write everything */
    CHECK(MPI_File_write_at_all (fh, 0,
                                 data, 1, mem_type,
                                 MPI_STATUS_IGNORE));
    /* verify */
    CHECK(MPI_File_set_view(fh, 0, MPI_BYTE, MPI_BYTE,
		"native", MPI_INFO_NULL));
    CHECK(MPI_File_read_at_all(fh, 0,
		verify, (HEADER+PAD+BLK_COUNT*DATA_SIZE)/sizeof(int), MPI_INT,
		MPI_STATUS_IGNORE));

    /* header and block padding should have no data */
    for (i=0; i<(HEADER+PAD)/sizeof(int); i++) {
	if (verify[i] != 0) {
	    nr_errors++;
	    fprintf(stderr, "expected 0, read %d\n", verify[i]);
	}
    }
    /* blocks are replicated */
    for (j=0; j<BLK_COUNT; j++ ) {
	for (k=0; k<(DATA_SIZE/sizeof(int)); k++) {
	    if (verify[(HEADER+PAD)/sizeof(int) + k + j*(DATA_SIZE/sizeof(int))] !=
		    data[k]) {
		nr_errors++;
		fprintf(stderr, "expcted %d, read %d\n", data[k],
			verify[(HEADER+PAD)/sizeof(int) + k + j*(DATA_SIZE/sizeof(int))]);
	    }
	    i++;
	}
    }

    MPI_File_close(&fh);

    MPI_Type_free (&mem_type);
    MPI_Type_free(&file_type);

    if (nr_errors == 0) printf(" No Errors\n");

    MPI_Finalize ();

    free(data);
    return 0;
}
示例#16
0
int main(int argc, char **argv)
{
    int *buf, i, mynod, nprocs, len, b[3];
    int errs = 0, toterrs;
    MPI_Aint d[3];
    MPI_File fh;
    MPI_Status status;
    char *filename;
    MPI_Datatype typevec, newtype, t[3];

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &mynod);

    if (nprocs != 2) {
        fprintf(stderr, "Run this program on two processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

/* process 0 takes the file name as a command-line argument and
   broadcasts it to other processes */
    if (!mynod) {
        i = 1;
        while ((i < argc) && strcmp("-fname", *argv)) {
            i++;
            argv++;
        }
        if (i >= argc) {
            fprintf(stderr, "\n*#  Usage: noncontig_coll -fname filename\n\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        argv++;
        len = strlen(*argv);
        filename = (char *) malloc(len + 1);
        strcpy(filename, *argv);
        MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
        MPI_Bcast(filename, len + 1, MPI_CHAR, 0, MPI_COMM_WORLD);
    } else {
        MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
        filename = (char *) malloc(len + 1);
        MPI_Bcast(filename, len + 1, MPI_CHAR, 0, MPI_COMM_WORLD);
    }

    buf = (int *) malloc(SIZE * sizeof(int));

    MPI_Type_vector(SIZE / 2, 1, 2, MPI_INT, &typevec);

    b[0] = b[1] = b[2] = 1;
    d[0] = 0;
    d[1] = mynod * sizeof(int);
    d[2] = SIZE * sizeof(int);
    t[0] = MPI_LB;
    t[1] = typevec;
    t[2] = MPI_UB;

    MPI_Type_struct(3, b, d, t, &newtype);
    MPI_Type_commit(&newtype);
    MPI_Type_free(&typevec);

    if (!mynod) {
#if VERBOSE
        fprintf(stderr,
                "\ntesting noncontiguous in memory, noncontiguous in file using collective I/O\n");
#endif
        MPI_File_delete(filename, MPI_INFO_NULL);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    MPI_CHECK(MPI_File_open(MPI_COMM_WORLD, filename,
                            MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh));

    MPI_CHECK(MPI_File_set_view(fh, 0, MPI_INT, newtype, "native", MPI_INFO_NULL));

    for (i = 0; i < SIZE; i++)
        buf[i] = i + mynod * SIZE;
    MPI_CHECK(MPI_File_write_all(fh, buf, 1, newtype, &status));

    MPI_Barrier(MPI_COMM_WORLD);

    for (i = 0; i < SIZE; i++)
        buf[i] = -1;

    MPI_CHECK(MPI_File_read_at_all(fh, 0, buf, 1, newtype, &status));

    for (i = 0; i < SIZE; i++) {
        if (!mynod) {
            if ((i % 2) && (buf[i] != -1)) {
                errs++;
                fprintf(stderr, "Process %d: buf %d is %d, should be -1\n", mynod, i, buf[i]);
            }
            if (!(i % 2) && (buf[i] != i)) {
                errs++;
                fprintf(stderr, "Process %d: buf %d is %d, should be %d\n", mynod, i, buf[i], i);
            }
        } else {
            if ((i % 2) && (buf[i] != i + mynod * SIZE)) {
                errs++;
                fprintf(stderr, "Process %d: buf %d is %d, should be %d\n",
                        mynod, i, buf[i], i + mynod * SIZE);
            }
            if (!(i % 2) && (buf[i] != -1)) {
                errs++;
                fprintf(stderr, "Process %d: buf %d is %d, should be -1\n", mynod, i, buf[i]);
            }
        }
    }

    MPI_CHECK(MPI_File_close(&fh));

    MPI_Barrier(MPI_COMM_WORLD);

    if (!mynod) {
#if VERBOSE
        fprintf(stderr,
                "\ntesting noncontiguous in memory, contiguous in file using collective I/O\n");
#endif
        MPI_File_delete(filename, MPI_INFO_NULL);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    MPI_CHECK(MPI_File_open(MPI_COMM_WORLD, filename,
                            MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh));

    for (i = 0; i < SIZE; i++)
        buf[i] = i + mynod * SIZE;
    MPI_CHECK(MPI_File_write_at_all(fh, mynod * (SIZE / 2) * sizeof(int),
                                    buf, 1, newtype, &status));

    MPI_Barrier(MPI_COMM_WORLD);

    for (i = 0; i < SIZE; i++)
        buf[i] = -1;

    MPI_CHECK(MPI_File_read_at_all(fh, mynod * (SIZE / 2) * sizeof(int), buf, 1, newtype, &status));

    for (i = 0; i < SIZE; i++) {
        if (!mynod) {
            if ((i % 2) && (buf[i] != -1)) {
                errs++;
                fprintf(stderr, "Process %d: buf %d is %d, should be -1\n", mynod, i, buf[i]);
            }
            if (!(i % 2) && (buf[i] != i)) {
                errs++;
                fprintf(stderr, "Process %d: buf %d is %d, should be %d\n", mynod, i, buf[i], i);
            }
        } else {
            if ((i % 2) && (buf[i] != i + mynod * SIZE)) {
                errs++;
                fprintf(stderr, "Process %d: buf %d is %d, should be %d\n",
                        mynod, i, buf[i], i + mynod * SIZE);
            }
            if (!(i % 2) && (buf[i] != -1)) {
                errs++;
                fprintf(stderr, "Process %d: buf %d is %d, should be -1\n", mynod, i, buf[i]);
            }
        }
    }

    MPI_CHECK(MPI_File_close(&fh));

    MPI_Barrier(MPI_COMM_WORLD);

    if (!mynod) {
#if VERBOSE
        fprintf(stderr,
                "\ntesting contiguous in memory, noncontiguous in file using collective I/O\n");
#endif
        MPI_File_delete(filename, MPI_INFO_NULL);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    MPI_CHECK(MPI_File_open(MPI_COMM_WORLD, filename,
                            MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh));

    MPI_CHECK(MPI_File_set_view(fh, 0, MPI_INT, newtype, "native", MPI_INFO_NULL));

    for (i = 0; i < SIZE; i++)
        buf[i] = i + mynod * SIZE;
    MPI_CHECK(MPI_File_write_all(fh, buf, SIZE, MPI_INT, &status));

    MPI_Barrier(MPI_COMM_WORLD);

    for (i = 0; i < SIZE; i++)
        buf[i] = -1;

    MPI_CHECK(MPI_File_read_at_all(fh, 0, buf, SIZE, MPI_INT, &status));

    for (i = 0; i < SIZE; i++) {
        if (!mynod) {
            if (buf[i] != i) {
                errs++;
                fprintf(stderr, "Process %d: buf %d is %d, should be %d\n", mynod, i, buf[i], i);
            }
        } else {
            if (buf[i] != i + mynod * SIZE) {
                errs++;
                fprintf(stderr, "Process %d: buf %d is %d, should be %d\n",
                        mynod, i, buf[i], i + mynod * SIZE);
            }
        }
    }

    MPI_CHECK(MPI_File_close(&fh));

    MPI_Allreduce(&errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
    if (mynod == 0) {
        if (toterrs > 0) {
            fprintf(stderr, "Found %d errors\n", toterrs);
        } else {
            fprintf(stdout, " No Errors\n");
        }
    }

    MPI_Type_free(&newtype);
    free(buf);
    free(filename);
    MPI_Finalize();
    return 0;
}