Exemplo n.º 1
0
void escrita()
{	
	int i;
	MPI_Type_contiguous(TAMTUPLA, MPI_INT,&tupla);    
    MPI_Type_commit(&tupla);

	ret = MPI_File_open(	MPI_COMM_WORLD, "arquivofinal.dat", 
							MPI_MODE_WRONLY | MPI_MODE_CREATE, 
							MPI_INFO_NULL, &arquivofinal);
	if (ret == 0)
		printf("Arquivo final aberto com sucesso no processo %d \n", meu_ranque);
	else 
	{
		printf("Arquivo final aberto com erro no processo %d \n", meu_ranque);
		MPI_Abort(MPI_COMM_WORLD, 1);
	}
	    
	MPI_File_set_view(	arquivofinal, 0,
						MPI_INT, MPI_INT, 
						"native", MPI_INFO_NULL);

	for (i = 0; i < TAMBUF; i+=TAMTUPLA)
		MPI_File_write_ordered(	arquivofinal, buf_leitura + i, 1, tupla, MPI_STATUS_IGNORE);
	
	MPI_File_close(&arquivofinal);
}
Exemplo n.º 2
0
FORT_DLL_SPEC void FORT_CALL mpi_file_write_ordered_ ( MPI_Fint *v1, void*v2, MPI_Fint *v3, MPI_Fint *v4, MPI_Fint *v5, MPI_Fint *ierr ) {
#ifdef MPI_MODE_RDONLY
    *ierr = MPI_File_write_ordered( MPI_File_f2c(*v1), v2, *v3, (MPI_Datatype)(*v4), (MPI_Status *)(v5) );
#else
    *ierr = MPI_ERR_INTERN;
#endif
}
Exemplo n.º 3
0
FORTRAN_API void FORT_CALL mpi_file_write_ordered_(MPI_Fint *fh,void *buf,MPI_Fint *count,
        MPI_Fint *datatype,MPI_Status *status, MPI_Fint *ierr ) {
    MPI_File fh_c;

    fh_c = MPI_File_f2c(*fh);
    *ierr = MPI_File_write_ordered(fh_c,buf,*count,*datatype,status);
}
Exemplo n.º 4
0
void mpi_file_write_ordered_(MPI_Fint *fh,void *buf,MPI_Fint *count,
                             MPI_Fint *datatype,MPI_Status *status, MPI_Fint *ierr ) {
    MPI_File fh_c;
    MPI_Datatype datatype_c;

    fh_c = MPI_File_f2c(*fh);
    datatype_c = MPI_Type_f2c(*datatype);

    *ierr = MPI_File_write_ordered(fh_c,buf,*count,datatype_c,status);
}
Exemplo n.º 5
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int size, rank, i, *buf, rc;
    MPI_File fh;
    MPI_Comm comm;
    MPI_Status status;

    MTest_Init( &argc, &argv );

    comm = MPI_COMM_WORLD;
    MPI_File_open( comm, (char*)"test.ord", 
		   MPI_MODE_RDWR | MPI_MODE_CREATE |
		   MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );

    MPI_Comm_size( comm, &size );
    MPI_Comm_rank( comm, &rank );
    buf = (int *)malloc( size * sizeof(int) );
    buf[0] = rank;
    rc = MPI_File_write_ordered( fh, buf, 1, MPI_INT, &status );
    if (rc) {
	MTestPrintErrorMsg( "File_write_ordered", rc );
	errs++;
    }
    /* make sure all writes finish before we seek/read */
    MPI_Barrier(comm);
    
    /* Set the individual pointer to 0, since we want to use a read_all */
    MPI_File_seek( fh, 0, MPI_SEEK_SET ); 
    MPI_File_read_all( fh, buf, size, MPI_INT, &status );

    for (i=0; i<size; i++) {
	if (buf[i] != i) {
	    errs++;
	    fprintf( stderr, "%d: buf[%d] = %d\n", rank, i, buf[i] );
	}
    }

    MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
    for (i=0; i<size; i++) buf[i] = -1;
    MPI_File_read_ordered( fh, buf, 1, MPI_INT, &status );
    if (buf[0] != rank) {
	errs++;
	fprintf( stderr, "%d: buf[0] = %d\n", rank, buf[0] );
    }

    free( buf );
    MPI_File_close( &fh );

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Exemplo n.º 6
0
FORT_DLL_SPEC void FORT_CALL mpi_file_write_ordered_ ( MPI_Fint *v1, void*v2, MPI_Fint *v3, MPI_Fint *v4, MPI_Fint *v5, MPI_Fint *ierr ){
#ifdef MPI_MODE_RDONLY

#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
    if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif

    if (v5 == MPI_F_STATUS_IGNORE) { v5 = (MPI_Fint*)MPI_STATUS_IGNORE; }
    *ierr = MPI_File_write_ordered( MPI_File_f2c(*v1), v2, (int)*v3, (MPI_Datatype)(*v4), (MPI_Status *)v5 );
#else
*ierr = MPI_ERR_INTERN;
#endif
}
Exemplo n.º 7
0
JNIEXPORT void JNICALL Java_mpi_File_writeOrdered(
        JNIEnv *env, jobject jthis, jlong fh, jobject buf, jboolean db,
        jint off, jint count, jlong jType, jint bType, jlongArray stat)
{
    MPI_Datatype type = (MPI_Datatype)jType;
    void *ptr;
    ompi_java_buffer_t *item;
    ompi_java_getReadPtr(&ptr, &item, env, buf, db, off, count, type, bType);
    MPI_Status status;
    int rc = MPI_File_write_ordered((MPI_File)fh, ptr, count, type, &status);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releaseReadPtr(ptr, item, buf, db);
    ompi_java_status_set(env, stat, &status);
}
void mpi_file_write_ordered_f(MPI_Fint *fh, char *buf, MPI_Fint *count,
                              MPI_Fint *datatype, MPI_Fint *status,
                              MPI_Fint *ierr)
{
    MPI_File c_fh = MPI_File_f2c(*fh);
    MPI_Datatype c_type = MPI_Type_f2c(*datatype);
    MPI_Status *c_status;
#if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT
    MPI_Status c_status2;
#endif

    /* See if we got MPI_STATUS_IGNORE */
    if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) {
        c_status = MPI_STATUS_IGNORE;
    } else {
        /* If sizeof(int) == sizeof(INTEGER), then there's no
           translation necessary -- let the underlying functions write
           directly into the Fortran status */

#if OMPI_SIZEOF_FORTRAN_INTEGER == SIZEOF_INT
        c_status = (MPI_Status *) status;
#else
        c_status = &c_status2;
#endif
    }

    *ierr = OMPI_FINT_2_INT(MPI_File_write_ordered(c_fh,
                            OMPI_F2C_BOTTOM(buf),
                            OMPI_FINT_2_INT(*count),
                            c_type,
                            c_status));

#if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT
    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr) &&
            MPI_STATUS_IGNORE != c_status) {
        MPI_Status_c2f(c_status, status);
    }
#endif
}
Exemplo n.º 9
0
void
ExporterVTK<MeshType,N>::saveNodeData( typename timeset_type::step_ptrtype step, Iterator __var, Iterator en, vtkSmartPointer<vtkout_type> out ) const
{
    while ( __var != en )
    {
        if ( !__var->second.worldComm().isActive() ) return;

        /* handle faces data */
#if 0
        if ( boption( _name="exporter.ensightgold.save-face" ) )
        {
            BOOST_FOREACH( auto m, __mesh->markerNames() )
            {
                if ( m.second[1] != __mesh->nDim-1 )
                    continue;
                VLOG(1) << "writing face with marker " << m.first << " with id " << m.second[0];
                auto pairit = __mesh->facesWithMarker( m.second[0], this->worldComm().localRank() );
                auto fit = pairit.first;
                auto fen = pairit.second;

                Feel::detail::MeshPoints<float> mp( __mesh.get(), this->worldComm(), fit, fen, true, true, true );
                int __ne = std::distance( fit, fen );

                int nverts = fit->numLocalVertices;
                DVLOG(2) << "Faces : " << __ne << "\n";

                if( this->worldComm().isMasterRank() )
                {
                    size = sizeof(buffer);
                }
                else
                {
                    size = 0;
                }
                memset(buffer, '\0', sizeof(buffer));
                strcpy( buffer, "part" );
                MPI_File_write_ordered(fh, buffer, size, MPI_CHAR, &status);

                int32_t partid = m.second[0];
                if( this->worldComm().isMasterRank() )
                {
                    size = 1;
                }
                else
                {
                    size = 0;
                }
                MPI_File_write_ordered(fh, &partid, size, MPI_INT32_T, &status);

                if( this->worldComm().isMasterRank() )
                {
                    size = sizeof(buffer);
                }
                else
                {
                    size = 0;
                }
                memset(buffer, '\0', sizeof(buffer));
                strcpy( buffer, "coordinates" );
                MPI_File_write_ordered(fh, buffer, size, MPI_CHAR, &status);

                // write values
                fit = pairit.first;
                fen = pairit.second;

                uint16_type nComponents = __var->second.nComponents;
                if ( __var->second.is_vectorial )
                    nComponents = 3;

                int nfaces = mp.ids.size();
                std::vector<float> field( nComponents*nfaces, 0.0 );
                for( ; fit != fen; ++fit )
                {
                    for ( uint16_type c = 0; c < nComponents; ++c )
                    {
                        for ( size_type j = 0; j < nverts; j++ )
                        {
                            size_type pid = mp.old2new[fit->point( j ).id()]-1;
                            size_type global_node_id = nfaces*c + pid ;
                            if ( c < __var->second.nComponents )
                            {
                                size_type thedof =  __var->second.start() +
                                                    boost::get<0>(__var->second.functionSpace()->dof()->faceLocalToGlobal( fit->id(), j, c ));

                                field[global_node_id] = __var->second.globalValue( thedof );
                            }
                            else
                            {
                                field[global_node_id] = 0;
                            }
                        }
                    }
                }
                /* Write each component separately */
                for ( uint16_type c = 0; c < __var->second.nComponents; ++c )
                {
                    MPI_File_write_ordered(fh, field.data() + nfaces * c, nfaces, MPI_FLOAT, &status);
                }
            } // boundaries loop
        }
#endif
        /* handle elements */
        uint16_type nComponents = __var->second.nComponents;

        VLOG(1) << "nComponents field: " << nComponents;
        if ( __var->second.is_vectorial )
        {
            nComponents = 3;
            VLOG(1) << "nComponents field(is_vectorial): " << nComponents;
        }

        /* we get that from the local processor */
        /* We do not need the renumbered global index */
        //auto r = markedelements(__mesh, M_markersToWrite[i], EntityProcessType::ALL);
        auto r = elements( step->mesh() );
        auto elt_it = r.template get<1>();
        auto elt_en = r.template get<2>();

        Feel::detail::MeshPoints<float> mp( step->mesh().get(), this->worldComm(), elt_it, elt_en, false, true, true, 0 );

        // previous implementation
        //size_type __field_size = mp.ids.size();
        //int nelts = std::distance(elt_it, elt_en);
        int npts = mp.ids.size();
        size_type __field_size = npts;
        if ( __var->second.is_vectorial )
            __field_size *= 3;
        std::vector<float> __field( __field_size, 0.0 );
        size_type e = 0;
        VLOG(1) << "field size=" << __field_size;
        if ( !__var->second.areGlobalValuesUpdated() )
            __var->second.updateGlobalValues();

        vtkSmartPointer<vtkFloatArray> da = vtkSmartPointer<vtkFloatArray>::New();
        da->SetName(__var->first.c_str());

        /* set array parameters */
        /* no need for preallocation if we are using Insert* methods */
        da->SetNumberOfComponents(nComponents);
        da->SetNumberOfTuples(npts);

        /*
           std::cout << this->worldComm().rank() << " nbPts:" << npts << " nComp:" << nComponents
           << " __var->second.nComponents:" << __var->second.nComponents << std::endl;
        */

        /*
           std::cout << this->worldComm().rank() << " marker=" << *mit << " nbPts:" << npts << " nComp:" << nComponents
           << " __evar->second.nComponents:" << __var->second.nComponents << std::endl;
           */

        /* loop on the elements */
        int index = 0;
        for ( ; elt_it != elt_en; ++elt_it )
        {
            VLOG(3) << "is ghost cell " << elt_it->isGhostCell();
            /* looop on the ccomponents is outside of the loop on the vertices */
            for ( uint16_type c = 0; c < nComponents; ++c )
            {
                for ( uint16_type p = 0; p < step->mesh()->numLocalVertices(); ++p, ++e )
                {
                    size_type ptid = mp.old2new[elt_it->point( p ).id()];
                    size_type global_node_id = ptid * nComponents + c;
                    //size_type global_node_id = mp.ids.size()*c + ptid ;
                    //LOG(INFO) << elt_it->get().point( p ).id() << " " << ptid << " " << global_node_id << std::endl;
                    DCHECK( ptid < step->mesh()->numPoints() ) << "Invalid point id " << ptid << " element: " << elt_it->id()
                            << " local pt:" << p
                            << " mesh numPoints: " << step->mesh()->numPoints();
                    DCHECK( global_node_id < __field_size ) << "Invalid dof id : " << global_node_id << " max size : " << __field_size;

                    if ( c < __var->second.nComponents )
                    {
                        size_type dof_id = boost::get<0>( __var->second.functionSpace()->dof()->localToGlobal( elt_it->id(), p, c ) );

                        __field[global_node_id] = __var->second.globalValue( dof_id );
                        //__field[npts*c + index] = __var->second.globalValue( dof_id );
                        //DVLOG(3) << "v[" << global_node_id << "]=" << __var->second.globalValue( dof_id ) << "  dof_id:" << dof_id;
                        DVLOG(3) << "v[" << (npts*c + index) << "]=" << __var->second.globalValue( dof_id ) << "  dof_id:" << dof_id;
                    }
                    else
                    {
                        __field[global_node_id] = 0.0;
                        //__field[npts*c + index] = 0.0;
                    }
                }
            }

            /* increment index of vertex */
            index++;
        }

        /* insert data into array */
        for(int i = 0; i < npts; i++)
        {
            da->SetTuple(mp.ids[i], __field.data() + i * nComponents);
        }

        /* add data array into the vtk object */
        out->GetPointData()->AddArray(da);

        /* Set the first scalar/vector/tensor data, we process as active */
        if( __var->second.is_scalar && !(out->GetPointData()->GetScalars()))
        {
            out->GetPointData()->SetActiveScalars(da->GetName());
        }
        if( __var->second.is_vectorial && !(out->GetPointData()->GetVectors()))
        {
            out->GetPointData()->SetActiveVectors(da->GetName());
        }
        if( __var->second.is_tensor2 && !(out->GetPointData()->GetTensors()))
        {
            out->GetPointData()->SetActiveTensors(da->GetName());
        }

        DVLOG(2) << "[ExporterVTK::saveNodal] saving " << __var->first << "done\n";

        ++__var;
    }
}
Exemplo n.º 10
0
int main(int argc, char *argv[])
{
  int width, height, maxiter, flag;
  double x[2], y[2], c[2];
  char *image, *stats;

  int comm_sz, my_rank;

  double t1, t2, delta;
  
  // Get and parse the program parameters
  getParams(argv, &flag, c, x, y, &width, &height, &maxiter, &image, &stats);

  // Allocate space for the image
  int *iterations = (int*)malloc( sizeof(int) * width * height );
  assert(iterations != NULL);
  
  // Start MPI
  MPI_Init(NULL, NULL);
  MPI_Comm_size(MPI_COMM_WORLD, &comm_sz);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
  
  // Begin process timer
  t1 = MPI_Wtime();
  
  /* compute set */
  int maxCount = parallelJulia(x, width, y, height, c, flag, maxiter, iterations, my_rank, comm_sz, MPI_COMM_WORLD);

  // Stop timer and compute time elapse
  t2 = MPI_Wtime();
  delta = t2 - t1;

  if (my_rank == 0)
  {
    /* save our picture for the viewer */
    printf("\nMaster process %d creating image...\n", my_rank);
    saveBMP(image, iterations, width, height);
    printf("\nFinished image creation\n");
  }

  // Wait for all processes to finish Julia computations
  MPI_Barrier(MPI_COMM_WORLD);

  // Open stats file
  MPI_File statsFile;

  if (MPI_File_open(MPI_COMM_WORLD, stats, MPI_MODE_CREATE|MPI_MODE_WRONLY, MPI_INFO_NULL, &statsFile) == MPI_SUCCESS) 
  {
    // Generate statistic string
    char message[100];
    sprintf(message, "process %d: max iterations reached = %d, time elapsed = %lf\n", my_rank, maxCount, delta);
  
    MPI_File_write_ordered(statsFile, message, strlen(message), MPI_CHAR, MPI_STATUS_IGNORE);

    MPI_File_close(&statsFile);
  }
  else printf("Problem opening file on process %d\n", my_rank);

  // Close MPI environment
  MPI_Finalize();
  
  // Free reserved memory
  free(iterations);

  return 0;
}
Exemplo n.º 11
0
int main(int argc, char **argv)
{
    int *buf, i, rank, nprocs, len, sum;
    int global_sum;
    int errs=0, toterrs, errcode;
    char *filename;
    MPI_File fh;
    MPI_Status status;

    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    double wr_stime, wr_etime, wr_time, wr_sumtime;
    double rd_stime, rd_etime, rd_time, rd_sumtime;

/* process 0 takes the file name as a command-line argument and
   broadcasts it to other processes */
    if (!rank) {
	i = 1;
	while ((i < argc) && strcmp("-fname", *argv)) {
	    i++;
	    argv++;
	}
	if (i >= argc) {
	    fprintf(stderr, "\n*#  Usage: shared_fp -fname filename\n\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
	argv++;
	len = strlen(*argv);
	filename = (char *) malloc(len+10);
	strcpy(filename, *argv);
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	MPI_Bcast(filename, len+10, MPI_CHAR, 0, MPI_COMM_WORLD);
    }
    else {
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	filename = (char *) malloc(len+10);
	MPI_Bcast(filename, len+10, MPI_CHAR, 0, MPI_COMM_WORLD);
    }

    buf = (int *) malloc(COUNT * sizeof(int));

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    for (i=0; i<COUNT; i++) buf[i] = COUNT*rank + i;

    errcode = MPI_File_open(MPI_COMM_WORLD, filename,
		    MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "MPI_File_open");
    }

    wr_stime = MPI_Wtime();

    errcode = MPI_File_write_ordered(fh, buf, COUNT, MPI_INT, &status);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "MPI_File_write_shared");
    }
    wr_etime = MPI_Wtime();

    for (i=0; i<COUNT; i++) buf[i] = 0;

    MPI_Barrier(MPI_COMM_WORLD);

    rd_stime = MPI_Wtime();
    errcode = MPI_File_seek_shared(fh, 0, MPI_SEEK_SET);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "MPI_File_seek_shared");
    }

    errcode = MPI_File_read_ordered(fh, buf, COUNT, MPI_INT, &status);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "MPI_File_read_shared");
    }

    rd_etime = MPI_Wtime();
    MPI_File_close(&fh);

    sum = 0;
    for (i=0; i<COUNT; i++) sum += buf[i];

    MPI_Allreduce(&sum, &global_sum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

    wr_time = wr_etime - wr_stime;
    rd_time = rd_etime - rd_stime;

    MPI_Allreduce(&wr_time, &wr_sumtime, 1,
        MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
    MPI_Allreduce(&rd_time, &rd_sumtime, 1,
        MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);

    if (global_sum != (((COUNT*nprocs - 1)*(COUNT*nprocs))/2)) {
	errs++;
	fprintf(stderr, "Error: sum %d, global_sum %d, %d\n",
		sum, global_sum,(((COUNT*nprocs - 1)*(COUNT*nprocs))/2));
    }

    free(buf);
    free(filename);

    MPI_Allreduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
    if (rank == 0) {
	if( toterrs > 0) {
	    fprintf( stderr, "Found %d errors\n", toterrs );
	}
	else {
	    fprintf( stdout, " No Errors\n" );
#ifdef TIMING
            fprintf( stderr, "nprocs: %d bytes: %d write: %f read %f\n",
                 nprocs, COUNT*sizeof(int), wr_sumtime, rd_sumtime);
#endif
	}
    }

    MPI_Finalize();
    return 0;
}
Exemplo n.º 12
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int size, rank, i, *buf, count, rc;
    MPI_File fh;
    MPI_Comm comm;
    MPI_Status status;

    MTest_Init( &argc, &argv );

    comm = MPI_COMM_WORLD;
    rc = MPI_File_open( comm, (char*)"test.ord", 
			MPI_MODE_RDWR | MPI_MODE_CREATE |
			MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );
    if (rc) {
	MTestPrintErrorMsg( "File_open", rc );
	errs++;
	/* If the open fails, there isn't anything else that we can do */
	goto fn_fail;
    }


    MPI_Comm_size( comm, &size );
    MPI_Comm_rank( comm, &rank );
    buf = (int *)malloc( size * sizeof(int) );
    buf[0] = rank;
    /* Write to file */
    rc = MPI_File_write_ordered( fh, buf, 1, MPI_INT, &status );
    if (rc) {
	MTestPrintErrorMsg( "File_write_ordered", rc );
	errs++;
    }
    else {
	MPI_Get_count( &status, MPI_INT, &count );
	if (count != 1) {
	    errs++;
	    fprintf( stderr, "Wrong count (%d) on write-ordered\n", count );
	}
    }

    /* Set the individual pointer to 0, since we want to use a read_all */
    MPI_File_seek( fh, 0, MPI_SEEK_SET ); 

    /* Read nothing (check status) */
    memset( &status, 0xff, sizeof(MPI_Status) );
    MPI_File_read( fh, buf, 0, MPI_INT, &status );
    MPI_Get_count( &status, MPI_INT, &count );
    if (count != 0) {
	errs++;
	fprintf( stderr, "Count not zero (%d) on read\n", count );
    }

    /* Write nothing (check status) */
    memset( &status, 0xff, sizeof(MPI_Status) );
    MPI_File_write( fh, buf, 0, MPI_INT, &status );
    if (count != 0) {
	errs++;
	fprintf( stderr, "Count not zero (%d) on write\n", count );
    }

    /* Read shared nothing (check status) */
    MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
    /* Read nothing (check status) */
    memset( &status, 0xff, sizeof(MPI_Status) );
    MPI_File_read_shared( fh, buf, 0, MPI_INT, &status );
    MPI_Get_count( &status, MPI_INT, &count );
    if (count != 0) {
	errs++;
	fprintf( stderr, "Count not zero (%d) on read shared\n", count );
    }
    
    /* Write nothing (check status) */
    memset( &status, 0xff, sizeof(MPI_Status) );
    MPI_File_write_shared( fh, buf, 0, MPI_INT, &status );
    if (count != 0) {
	errs++;
	fprintf( stderr, "Count not zero (%d) on write\n", count );
    }

    MPI_Barrier( comm );

    MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
    for (i=0; i<size; i++) buf[i] = -1;
    MPI_File_read_ordered( fh, buf, 1, MPI_INT, &status );
    if (buf[0] != rank) {
	errs++;
	fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );
    }

    free( buf );

    MPI_File_close( &fh );

 fn_fail:
    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Exemplo n.º 13
0
/*
 * access style is explicitly described as modifiable.  values include
 * read_once, read_mostly, write_once, write_mostlye, random
 *
 *
 */
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int buf[10];
    int rank;
    MPI_Comm comm;
    MPI_Status status;
    MPI_File fh;
    MPI_Info infoin, infoout;
    char value[1024];
    int flag, count;

    MTest_Init(&argc, &argv);
    comm = MPI_COMM_WORLD;

    MPI_Comm_rank(comm, &rank);
    MPI_Info_create(&infoin);
    MPI_Info_set(infoin, (char *) "access_style", (char *) "write_once,random");
    MPI_File_open(comm, (char *) "testfile", MPI_MODE_RDWR | MPI_MODE_CREATE, infoin, &fh);
    buf[0] = rank;
    err = MPI_File_write_ordered(fh, buf, 1, MPI_INT, &status);
    if (err) {
        errs++;
        MTestPrintError(err);
    }

    MPI_Info_set(infoin, (char *) "access_style", (char *) "read_once");
    err = MPI_File_seek_shared(fh, 0, MPI_SEEK_SET);
    if (err) {
        errs++;
        MTestPrintError(err);
    }

    err = MPI_File_set_info(fh, infoin);
    if (err) {
        errs++;
        MTestPrintError(err);
    }
    MPI_Info_free(&infoin);
    buf[0] = -1;

    err = MPI_File_read_ordered(fh, buf, 1, MPI_INT, &status);
    if (err) {
        errs++;
        MTestPrintError(err);
    }
    MPI_Get_count(&status, MPI_INT, &count);
    if (count != 1) {
        errs++;
        printf("Expected to read one int, read %d\n", count);
    }
    if (buf[0] != rank) {
        errs++;
        printf("Did not read expected value (%d)\n", buf[0]);
    }

    err = MPI_File_get_info(fh, &infoout);
    if (err) {
        errs++;
        MTestPrintError(err);
    }
    MPI_Info_get(infoout, (char *) "access_style", 1024, value, &flag);
    /* Note that an implementation is allowed to ignore the set_info,
     * so we'll accept either the original or the updated version */
    if (!flag) {
        ;
        /*
         * errs++;
         * printf("Access style hint not saved\n");
         */
    } else {
        if (strcmp(value, "read_once") != 0 && strcmp(value, "write_once,random") != 0) {
            errs++;
            printf("value for access_style unexpected; is %s\n", value);
        }
    }
    MPI_Info_free(&infoout);
    err = MPI_File_close(&fh);
    if (err) {
        errs++;
        MTestPrintError(err);
    }
    MPI_Barrier(comm);
    MPI_Comm_rank(comm, &rank);
    if (rank == 0) {
        err = MPI_File_delete((char *) "testfile", MPI_INFO_NULL);
        if (err) {
            errs++;
            MTestPrintError(err);
        }
    }

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}