示例#1
0
size_t SID_fread_ordered(void *buffer,size_t size_per_item, size_t n_items,SID_fp *fp){
  size_t r_val;
#if USE_MPI
#if USE_MPI_IO
  int    r_val_i;
  MPI_Status status;
  MPI_File_read_ordered(fp->fp,
                        buffer,
                        size_per_item*n_items,
                        MPI_BYTE,
                        &status);
  MPI_Get_count(&status,MPI_BYTE,&r_val_i);
  r_val=(size_t)r_val_i/size_per_item;
#else
  r_val=fread(buffer,
              size_per_item,
              n_items,
              fp->fp);
#endif
#else
  r_val=fread(buffer,
              size_per_item,
              n_items,
              fp->fp);
#endif
  return(r_val);
}
示例#2
0
FORTRAN_API void FORT_CALL mpi_file_read_ordered_(MPI_Fint *fh,void *buf,MPI_Fint *count,
                      MPI_Fint *datatype,MPI_Status *status, MPI_Fint *ierr ){
    MPI_File fh_c;
    
    fh_c = MPI_File_f2c(*fh);
    *ierr = MPI_File_read_ordered(fh_c,buf,*count,(MPI_Datatype)*datatype,status);
}
示例#3
0
void mpi_file_read_ordered_(MPI_Fint *fh,void *buf,MPI_Fint *count,
                      MPI_Fint *datatype,MPI_Status *status, MPI_Fint *ierr )
{
    MPI_File fh_c;
    MPI_Datatype datatype_c;
    
    fh_c = MPI_File_f2c(*fh);
    datatype_c = MPI_Type_f2c(*datatype);

    *ierr = MPI_File_read_ordered(fh_c,buf,*count,datatype_c,status);
}
示例#4
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int size, rank, i, *buf, rc;
    MPI_File fh;
    MPI_Comm comm;
    MPI_Status status;

    MTest_Init( &argc, &argv );

    comm = MPI_COMM_WORLD;
    MPI_File_open( comm, (char*)"test.ord", 
		   MPI_MODE_RDWR | MPI_MODE_CREATE |
		   MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );

    MPI_Comm_size( comm, &size );
    MPI_Comm_rank( comm, &rank );
    buf = (int *)malloc( size * sizeof(int) );
    buf[0] = rank;
    rc = MPI_File_write_ordered( fh, buf, 1, MPI_INT, &status );
    if (rc) {
	MTestPrintErrorMsg( "File_write_ordered", rc );
	errs++;
    }
    /* make sure all writes finish before we seek/read */
    MPI_Barrier(comm);
    
    /* Set the individual pointer to 0, since we want to use a read_all */
    MPI_File_seek( fh, 0, MPI_SEEK_SET ); 
    MPI_File_read_all( fh, buf, size, MPI_INT, &status );

    for (i=0; i<size; i++) {
	if (buf[i] != i) {
	    errs++;
	    fprintf( stderr, "%d: buf[%d] = %d\n", rank, i, buf[i] );
	}
    }

    MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
    for (i=0; i<size; i++) buf[i] = -1;
    MPI_File_read_ordered( fh, buf, 1, MPI_INT, &status );
    if (buf[0] != rank) {
	errs++;
	fprintf( stderr, "%d: buf[0] = %d\n", rank, buf[0] );
    }

    free( buf );
    MPI_File_close( &fh );

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
示例#5
0
FORT_DLL_SPEC void FORT_CALL mpi_file_read_ordered_ ( MPI_Fint *v1, void*v2, MPI_Fint *v3, MPI_Fint *v4, MPI_Fint *v5, MPI_Fint *ierr ){
#ifdef MPI_MODE_RDONLY

#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
    if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif

    if (v5 == MPI_F_STATUS_IGNORE) { v5 = (MPI_Fint*)MPI_STATUS_IGNORE; }
    *ierr = MPI_File_read_ordered( MPI_File_f2c(*v1), v2, (int)*v3, (MPI_Datatype)(*v4), (MPI_Status *)v5 );
#else
*ierr = MPI_ERR_INTERN;
#endif
}
示例#6
0
JNIEXPORT void JNICALL Java_mpi_File_readOrdered(
        JNIEnv *env, jobject jthis, jlong fh, jobject buf, jboolean db,
        jint off, jint count, jlong jType, jint bType, jlongArray stat)
{
    MPI_Datatype type = (MPI_Datatype)jType;
    void *ptr;
    ompi_java_buffer_t *item;
    ompi_java_getWritePtr(&ptr, &item, env, buf, db, count, type);
    MPI_Status status;
    int rc = MPI_File_read_ordered((MPI_File)fh, ptr, count, type, &status);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releaseWritePtr(ptr, item, env, buf, db, off, count, type, bType);
    ompi_java_status_set(env, stat, &status);
}
示例#7
0
void ompi_file_read_ordered_f(MPI_Fint *fh, char *buf, MPI_Fint *count,
			     MPI_Fint *datatype, MPI_Fint *status,
			     MPI_Fint *ierr)
{
   int c_ierr;
   MPI_File c_fh = MPI_File_f2c(*fh);
   MPI_Datatype c_type = MPI_Type_f2c(*datatype);
    OMPI_FORTRAN_STATUS_DECLARATION(c_status,c_status2)

    OMPI_FORTRAN_STATUS_SET_POINTER(c_status,c_status2,status)

   c_ierr = MPI_File_read_ordered(c_fh,
                                  OMPI_F2C_BOTTOM(buf),
                                  OMPI_FINT_2_INT(*count),
                                  c_type,
                                  c_status);
   if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);

    OMPI_FORTRAN_STATUS_RETURN(c_status,c_status2,status,c_ierr)
}
void ompi_file_read_ordered_f(MPI_Fint *fh, char *buf, MPI_Fint *count,
			     MPI_Fint *datatype, MPI_Fint *status, 
			     MPI_Fint *ierr)
{
   int c_ierr;
   MPI_File c_fh = MPI_File_f2c(*fh);
   MPI_Datatype c_type = MPI_Type_f2c(*datatype);
   MPI_Status *c_status;
#if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT
   MPI_Status c_status2;
#endif

   /* See if we got MPI_STATUS_IGNORE */
   if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) {
      c_status = MPI_STATUS_IGNORE;
   } else {
      /* If sizeof(int) == sizeof(INTEGER), then there's no
         translation necessary -- let the underlying functions write
         directly into the Fortran status */

#if OMPI_SIZEOF_FORTRAN_INTEGER == SIZEOF_INT
      c_status = (MPI_Status *) status;
#else
      c_status = &c_status2;
#endif
   }

   c_ierr = MPI_File_read_ordered(c_fh, 
                                  OMPI_F2C_BOTTOM(buf),
                                  OMPI_FINT_2_INT(*count),
                                  c_type,
                                  c_status);
   if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);

#if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT
   if (MPI_SUCCESS == c_ierr &&
       MPI_STATUS_IGNORE != c_status) {
      MPI_Status_c2f(c_status, status);
   }
#endif
}
示例#9
0
int main(int argc, char **argv)
{
    int *buf, i, rank, nprocs, len, sum;
    int global_sum;
    int errs=0, toterrs, errcode;
    char *filename;
    MPI_File fh;
    MPI_Status status;

    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    double wr_stime, wr_etime, wr_time, wr_sumtime;
    double rd_stime, rd_etime, rd_time, rd_sumtime;

/* process 0 takes the file name as a command-line argument and
   broadcasts it to other processes */
    if (!rank) {
	i = 1;
	while ((i < argc) && strcmp("-fname", *argv)) {
	    i++;
	    argv++;
	}
	if (i >= argc) {
	    fprintf(stderr, "\n*#  Usage: shared_fp -fname filename\n\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
	argv++;
	len = strlen(*argv);
	filename = (char *) malloc(len+10);
	strcpy(filename, *argv);
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	MPI_Bcast(filename, len+10, MPI_CHAR, 0, MPI_COMM_WORLD);
    }
    else {
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	filename = (char *) malloc(len+10);
	MPI_Bcast(filename, len+10, MPI_CHAR, 0, MPI_COMM_WORLD);
    }

    buf = (int *) malloc(COUNT * sizeof(int));

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    for (i=0; i<COUNT; i++) buf[i] = COUNT*rank + i;

    errcode = MPI_File_open(MPI_COMM_WORLD, filename,
		    MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "MPI_File_open");
    }

    wr_stime = MPI_Wtime();

    errcode = MPI_File_write_ordered(fh, buf, COUNT, MPI_INT, &status);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "MPI_File_write_shared");
    }
    wr_etime = MPI_Wtime();

    for (i=0; i<COUNT; i++) buf[i] = 0;

    MPI_Barrier(MPI_COMM_WORLD);

    rd_stime = MPI_Wtime();
    errcode = MPI_File_seek_shared(fh, 0, MPI_SEEK_SET);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "MPI_File_seek_shared");
    }

    errcode = MPI_File_read_ordered(fh, buf, COUNT, MPI_INT, &status);
    if (errcode != MPI_SUCCESS) {
	    handle_error(errcode, "MPI_File_read_shared");
    }

    rd_etime = MPI_Wtime();
    MPI_File_close(&fh);

    sum = 0;
    for (i=0; i<COUNT; i++) sum += buf[i];

    MPI_Allreduce(&sum, &global_sum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

    wr_time = wr_etime - wr_stime;
    rd_time = rd_etime - rd_stime;

    MPI_Allreduce(&wr_time, &wr_sumtime, 1,
        MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
    MPI_Allreduce(&rd_time, &rd_sumtime, 1,
        MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);

    if (global_sum != (((COUNT*nprocs - 1)*(COUNT*nprocs))/2)) {
	errs++;
	fprintf(stderr, "Error: sum %d, global_sum %d, %d\n",
		sum, global_sum,(((COUNT*nprocs - 1)*(COUNT*nprocs))/2));
    }

    free(buf);
    free(filename);

    MPI_Allreduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
    if (rank == 0) {
	if( toterrs > 0) {
	    fprintf( stderr, "Found %d errors\n", toterrs );
	}
	else {
	    fprintf( stdout, " No Errors\n" );
#ifdef TIMING
            fprintf( stderr, "nprocs: %d bytes: %d write: %f read %f\n",
                 nprocs, COUNT*sizeof(int), wr_sumtime, rd_sumtime);
#endif
	}
    }

    MPI_Finalize();
    return 0;
}
示例#10
0
文件: tris.c 项目: acekat/PPAR
int main(int argc, char* argv[])
{
	if (argc != 3) {
		fprintf(stderr, "%s\n", usage);
		return 0;
	}

	/* Initialisation */
	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
	MPI_Comm_size(MPI_COMM_WORLD, &nb_proc);
	
	MPI_Status  status;

	int nb_elem = atoi(argv[1]);	// nombre d'éléments total
	int sort_type = atoi(argv[2]);	// type de l'algorithme de tri

	k = nb_elem / nb_proc;

	int *tab_sort = (int *)malloc(k*sizeof(int));
	int *tab_tmp = (int *)malloc(k*sizeof(int));
	
	if ((tab_tmp == NULL) || (tab_sort == NULL)) {
		fprintf(stderr, "Erreur allocation mémoire du tableau \n");
		MPI_Finalize();
		exit(1);
	}

	int left = my_rank-1;
	int right = my_rank+1;

	// initialise le tableau local
	P0 printf("Initialisation du tableau...");
	switch (sort_type) {
		case 1:	
		case 2: init_rand(tab_tmp, k); break;
		case 3:
		case 4:
		case 5: init_rand(tab_sort, k); break;
	}
	P0 printf(" OK!\n");

	P0 printf("Calcul...\n");

	// début du chronométrage
	double start, end;
	start = MPI_Wtime();
	
	// choix de l'algorithme de tri
	switch (sort_type) {
		case 1: PRAM(tab_tmp, tab_sort); break;
		case 2: PRAM_omp(tab_tmp, tab_sort); break;
		case 3: quick_sort(tab_sort, k); break;
		case 4: quick_sort_omp(tab_sort, k); break;
		case 5: qsort(tab_sort, k, sizeof(int), compare); break;
	}

	// tri pair-impair
	int step;
	for (step = 0; step < nb_proc; step++) {
		if ((my_rank%2) - (step%2) == 0) {
			if (my_rank != nb_proc-1) {
				MPI_Recv(tab_tmp, k, MPI_INT, right, TAG_TAB, MPI_COMM_WORLD, &status);
				merge_sort(tab_sort, tab_tmp);
				MPI_Send(tab_tmp, k, MPI_INT, right, TAG_TAB, MPI_COMM_WORLD);
			}
		}
		else {
			if (my_rank != 0) {
				MPI_Send(tab_sort, k, MPI_INT, left, TAG_TAB, MPI_COMM_WORLD);
				MPI_Recv(tab_sort, k, MPI_INT, left, TAG_TAB, MPI_COMM_WORLD, &status);
			}
		}
	}
	
	// fin du chronométrage
	end = MPI_Wtime();

	print_results(end - start);

	// écriture dans le fichier 	
	if (nb_elem <= NMAX) {
		P0 printf("Ecriture du fichier...");

		MPI_File file; 
		MPI_Offset my_offset;
		char filename[strlen("/Vrac/ppar_cassat_ducamain_sort")+1];
		strcpy(filename, "/Vrac/ppar_cassat_ducamain_sort");	

		my_offset = my_rank * sizeof(int) * k;
		MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &file);
		MPI_File_write_at(file, my_offset, tab_sort, k, MPI_INT, &status);
		// printf("(%d) a écrit: ", my_rank);
		// print_tab(tab_sort, k);
		
		// attends que tous aient écrit
		MPI_Barrier(MPI_COMM_WORLD);
		
		MPI_File_read_ordered(file, tab_sort, k, MPI_INT, &status);
		MPI_File_close(&file);
		// printf("(%d) a lu: ", my_rank);
		// print_tab(tab_sort, k);

		P0 printf(" OK!\n");
	}

	// Vérification du tri
	P0 printf("Vérification du tri...\n");

	if (!check_sort(tab_sort, k) && (my_rank == 0))
		printf("\tTri correct!\n");

	#ifdef BENCH
	P0 fprintf(stderr, "\t%d\t%d\t%d", sort_type, nb_elem, nb_proc);

	#ifdef _OPENMP
	#pragma omp parallel
	{
		if ((my_rank == 0) && (omp_get_thread_num() == 0))
			fprintf(stderr, "\t%d", omp_get_num_threads());
	}
	#endif
	
	P0 fprintf(stderr, "\n");
	#endif 

	free(tab_sort);
	free(tab_tmp);

	/* Desactivation */
	MPI_Finalize();
	return 0;
}
示例#11
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int size, rank, i, *buf, count, rc;
    MPI_File fh;
    MPI_Comm comm;
    MPI_Status status;

    MTest_Init( &argc, &argv );

    comm = MPI_COMM_WORLD;
    rc = MPI_File_open( comm, (char*)"test.ord", 
			MPI_MODE_RDWR | MPI_MODE_CREATE |
			MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );
    if (rc) {
	MTestPrintErrorMsg( "File_open", rc );
	errs++;
	/* If the open fails, there isn't anything else that we can do */
	goto fn_fail;
    }


    MPI_Comm_size( comm, &size );
    MPI_Comm_rank( comm, &rank );
    buf = (int *)malloc( size * sizeof(int) );
    buf[0] = rank;
    /* Write to file */
    rc = MPI_File_write_ordered( fh, buf, 1, MPI_INT, &status );
    if (rc) {
	MTestPrintErrorMsg( "File_write_ordered", rc );
	errs++;
    }
    else {
	MPI_Get_count( &status, MPI_INT, &count );
	if (count != 1) {
	    errs++;
	    fprintf( stderr, "Wrong count (%d) on write-ordered\n", count );
	}
    }

    /* Set the individual pointer to 0, since we want to use a read_all */
    MPI_File_seek( fh, 0, MPI_SEEK_SET ); 

    /* Read nothing (check status) */
    memset( &status, 0xff, sizeof(MPI_Status) );
    MPI_File_read( fh, buf, 0, MPI_INT, &status );
    MPI_Get_count( &status, MPI_INT, &count );
    if (count != 0) {
	errs++;
	fprintf( stderr, "Count not zero (%d) on read\n", count );
    }

    /* Write nothing (check status) */
    memset( &status, 0xff, sizeof(MPI_Status) );
    MPI_File_write( fh, buf, 0, MPI_INT, &status );
    if (count != 0) {
	errs++;
	fprintf( stderr, "Count not zero (%d) on write\n", count );
    }

    /* Read shared nothing (check status) */
    MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
    /* Read nothing (check status) */
    memset( &status, 0xff, sizeof(MPI_Status) );
    MPI_File_read_shared( fh, buf, 0, MPI_INT, &status );
    MPI_Get_count( &status, MPI_INT, &count );
    if (count != 0) {
	errs++;
	fprintf( stderr, "Count not zero (%d) on read shared\n", count );
    }
    
    /* Write nothing (check status) */
    memset( &status, 0xff, sizeof(MPI_Status) );
    MPI_File_write_shared( fh, buf, 0, MPI_INT, &status );
    if (count != 0) {
	errs++;
	fprintf( stderr, "Count not zero (%d) on write\n", count );
    }

    MPI_Barrier( comm );

    MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
    for (i=0; i<size; i++) buf[i] = -1;
    MPI_File_read_ordered( fh, buf, 1, MPI_INT, &status );
    if (buf[0] != rank) {
	errs++;
	fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );
    }

    free( buf );

    MPI_File_close( &fh );

 fn_fail:
    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
示例#12
0
/*
 * access style is explicitly described as modifiable.  values include
 * read_once, read_mostly, write_once, write_mostlye, random
 *
 *
 */
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int buf[10];
    int rank;
    MPI_Comm comm;
    MPI_Status status;
    MPI_File fh;
    MPI_Info infoin, infoout;
    char value[1024];
    int flag, count;

    MTest_Init(&argc, &argv);
    comm = MPI_COMM_WORLD;

    MPI_Comm_rank(comm, &rank);
    MPI_Info_create(&infoin);
    MPI_Info_set(infoin, (char *) "access_style", (char *) "write_once,random");
    MPI_File_open(comm, (char *) "testfile", MPI_MODE_RDWR | MPI_MODE_CREATE, infoin, &fh);
    buf[0] = rank;
    err = MPI_File_write_ordered(fh, buf, 1, MPI_INT, &status);
    if (err) {
        errs++;
        MTestPrintError(err);
    }

    MPI_Info_set(infoin, (char *) "access_style", (char *) "read_once");
    err = MPI_File_seek_shared(fh, 0, MPI_SEEK_SET);
    if (err) {
        errs++;
        MTestPrintError(err);
    }

    err = MPI_File_set_info(fh, infoin);
    if (err) {
        errs++;
        MTestPrintError(err);
    }
    MPI_Info_free(&infoin);
    buf[0] = -1;

    err = MPI_File_read_ordered(fh, buf, 1, MPI_INT, &status);
    if (err) {
        errs++;
        MTestPrintError(err);
    }
    MPI_Get_count(&status, MPI_INT, &count);
    if (count != 1) {
        errs++;
        printf("Expected to read one int, read %d\n", count);
    }
    if (buf[0] != rank) {
        errs++;
        printf("Did not read expected value (%d)\n", buf[0]);
    }

    err = MPI_File_get_info(fh, &infoout);
    if (err) {
        errs++;
        MTestPrintError(err);
    }
    MPI_Info_get(infoout, (char *) "access_style", 1024, value, &flag);
    /* Note that an implementation is allowed to ignore the set_info,
     * so we'll accept either the original or the updated version */
    if (!flag) {
        ;
        /*
         * errs++;
         * printf("Access style hint not saved\n");
         */
    } else {
        if (strcmp(value, "read_once") != 0 && strcmp(value, "write_once,random") != 0) {
            errs++;
            printf("value for access_style unexpected; is %s\n", value);
        }
    }
    MPI_Info_free(&infoout);
    err = MPI_File_close(&fh);
    if (err) {
        errs++;
        MTestPrintError(err);
    }
    MPI_Barrier(comm);
    MPI_Comm_rank(comm, &rank);
    if (rank == 0) {
        err = MPI_File_delete((char *) "testfile", MPI_INFO_NULL);
        if (err) {
            errs++;
            MTestPrintError(err);
        }
    }

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}