Esempio n. 1
0
File: MPI-api.c Progetto: 8l/rose
void declareBindings (void)
{
  /* === Point-to-point === */
  void* buf;
  int count;
  MPI_Datatype datatype;
  int dest;
  int tag;
  MPI_Comm comm;
  MPI_Send (buf, count, datatype, dest, tag, comm); // L12
  int source;
  MPI_Status status;
  MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15
  MPI_Get_count (&status, datatype, &count);
  MPI_Bsend (buf, count, datatype, dest, tag, comm);
  MPI_Ssend (buf, count, datatype, dest, tag, comm);
  MPI_Rsend (buf, count, datatype, dest, tag, comm);
  void* buffer;
  int size;
  MPI_Buffer_attach (buffer, size); // L22
  MPI_Buffer_detach (buffer, &size);
  MPI_Request request;
  MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25
  MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Issend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irecv (buf, count, datatype, source, tag, comm, &request);
  MPI_Wait (&request, &status);
  int flag;
  MPI_Test (&request, &flag, &status); // L32
  MPI_Request_free (&request);
  MPI_Request* array_of_requests;
  int index;
  MPI_Waitany (count, array_of_requests, &index, &status); // L36
  MPI_Testany (count, array_of_requests, &index, &flag, &status);
  MPI_Status* array_of_statuses;
  MPI_Waitall (count, array_of_requests, array_of_statuses); // L39
  MPI_Testall (count, array_of_requests, &flag, array_of_statuses);
  int incount;
  int outcount;
  int* array_of_indices;
  MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L44--45
  MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L46--47
  MPI_Iprobe (source, tag, comm, &flag, &status); // L48
  MPI_Probe (source, tag, comm, &status);
  MPI_Cancel (&request);
  MPI_Test_cancelled (&status, &flag);
  MPI_Send_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Recv_init (buf, count, datatype, source, tag, comm, &request);
  MPI_Start (&request);
  MPI_Startall (count, array_of_requests);
  void* sendbuf;
  int sendcount;
  MPI_Datatype sendtype;
  int sendtag;
  void* recvbuf;
  int recvcount;
  MPI_Datatype recvtype;
  MPI_Datatype recvtag;
  MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag,
		recvbuf, recvcount, recvtype, source, recvtag,
		comm, &status); // L67--69
  MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag,
			comm, &status); // L70--71
  MPI_Datatype oldtype;
  MPI_Datatype newtype;
  MPI_Type_contiguous (count, oldtype, &newtype); // L74
  int blocklength;
  {
    int stride;
    MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78
  }
  {
    MPI_Aint stride;
    MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82
  }
  int* array_of_blocklengths;
  {
    int* array_of_displacements;
    MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements,
		      oldtype, &newtype); // L87--88
  }
  {
    MPI_Aint* array_of_displacements;
    MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements,
                       oldtype, &newtype); // L92--93
    MPI_Datatype* array_of_types;
    MPI_Type_struct (count, array_of_blocklengths, array_of_displacements,
                     array_of_types, &newtype); // L95--96
  }
  void* location;
  MPI_Aint address;
  MPI_Address (location, &address); // L100
  MPI_Aint extent;
  MPI_Type_extent (datatype, &extent); // L102
  MPI_Type_size (datatype, &size);
  MPI_Aint displacement;
  MPI_Type_lb (datatype, &displacement); // L105
  MPI_Type_ub (datatype, &displacement);
  MPI_Type_commit (&datatype);
  MPI_Type_free (&datatype);
  MPI_Get_elements (&status, datatype, &count);
  void* inbuf;
  void* outbuf;
  int outsize;
  int position;
  MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114
  int insize;
  MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype,
	      comm); // L116--117
  MPI_Pack_size (incount, datatype, comm, &size);

  /* === Collectives === */
  MPI_Barrier (comm); // L121
  int root;
  MPI_Bcast (buffer, count, datatype, root, comm); // L123
  MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
	      root, comm); // L124--125
  int* recvcounts;
  int* displs;
  MPI_Gatherv (sendbuf, sendcount, sendtype,
               recvbuf, recvcounts, displs, recvtype,
	       root, comm); // L128--130
  MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
               root, comm); // L131--132
  int* sendcounts;
  MPI_Scatterv (sendbuf, sendcounts, displs, sendtype,
		recvbuf, recvcount, recvtype, root, comm); // L134--135
  MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
                 comm); // L136--137
  MPI_Allgatherv (sendbuf, sendcount, sendtype,
		  recvbuf, recvcounts, displs, recvtype,
		  comm); // L138--140
  MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
		comm); // L141--142
  int* sdispls;
  int* rdispls;
  MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype,
                 recvbuf, recvcounts, rdispls, recvtype,
		 comm); // L145--147
  MPI_Op op;
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
unsigned long CSysSolve::FGMRES_LinSolver(const CSysVector & b, CSysVector & x, CMatrixVectorProduct & mat_vec,
                               CPreconditioner & precond, su2double tol, unsigned long m, su2double *residual, bool monitoring) {
	
int rank = 0;

#ifdef HAVE_MPI
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
  
  /*---  Check the subspace size ---*/
  
  if (m < 1) {
    if (rank == MASTER_NODE) cerr << "CSysSolve::FGMRES: illegal value for subspace size, m = " << m << endl;
#ifndef HAVE_MPI
    exit(EXIT_FAILURE);
#else
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Abort(MPI_COMM_WORLD,1);
    MPI_Finalize();
#endif
  }

  /*---  Check the subspace size ---*/
  
  if (m > 1000) {
    if (rank == MASTER_NODE) cerr << "CSysSolve::FGMRES: illegal value for subspace size (too high), m = " << m << endl;
#ifndef HAVE_MPI
    exit(EXIT_FAILURE);
#else
	MPI_Abort(MPI_COMM_WORLD,1);
    MPI_Finalize();
#endif
  }
  
  /*---  Define various arrays
	 Note: elements in w and z are initialized to x to avoid creating
	 a temporary CSysVector object for the copy constructor ---*/
  
  vector<CSysVector> w(m+1, x);
  vector<CSysVector> z(m+1, x);
  vector<su2double> g(m+1, 0.0);
  vector<su2double> sn(m+1, 0.0);
  vector<su2double> cs(m+1, 0.0);
  vector<su2double> y(m, 0.0);
  vector<vector<su2double> > H(m+1, vector<su2double>(m, 0.0));
  
  /*---  Calculate the norm of the rhs vector ---*/
  
  su2double norm0 = b.norm();
  
  /*---  Calculate the initial residual (actually the negative residual)
	 and compute its norm ---*/
  
  mat_vec(x, w[0]);
  w[0] -= b;
  
  su2double beta = w[0].norm();
  
  if ( (beta < tol*norm0) || (beta < eps) ) {
    
    /*---  System is already solved ---*/
    
    if (rank == MASTER_NODE) cout << "CSysSolve::FGMRES(): system solved by initial guess." << endl;
    return 0;
  }
  
  /*---  Normalize residual to get w_{0} (the negative sign is because w[0]
	 holds the negative residual, as mentioned above) ---*/
  
  w[0] /= -beta;
  
  /*---  Initialize the RHS of the reduced system ---*/
  
  g[0] = beta;
  
  /*--- Set the norm to the initial residual value ---*/
  
  norm0 = beta;

  /*---  Output header information including initial residual ---*/
  
  int i = 0;
  if ((monitoring) && (rank == MASTER_NODE)) {
    WriteHeader("FGMRES", tol, beta);
    WriteHistory(i, beta, norm0);
  }
  
  /*---  Loop over all search directions ---*/
  
  for (i = 0; i < (int)m; i++) {
    
    /*---  Check if solution has converged ---*/
    
    if (beta < tol*norm0) break;
    
    /*---  Precondition the CSysVector w[i] and store result in z[i] ---*/
    
    precond(w[i], z[i]);
    
    /*---  Add to Krylov subspace ---*/
    
    mat_vec(z[i], w[i+1]);
    
    /*---  Modified Gram-Schmidt orthogonalization ---*/
    
    ModGramSchmidt(i, H, w);
    
    /*---  Apply old Givens rotations to new column of the Hessenberg matrix
		 then generate the new Givens rotation matrix and apply it to
		 the last two elements of H[:][i] and g ---*/
    
    for (int k = 0; k < i; k++)
      ApplyGivens(sn[k], cs[k], H[k][i], H[k+1][i]);
    GenerateGivens(H[i][i], H[i+1][i], sn[i], cs[i]);
    ApplyGivens(sn[i], cs[i], g[i], g[i+1]);
    
    /*---  Set L2 norm of residual and check if solution has converged ---*/
    
    beta = fabs(g[i+1]);
    
    /*---  Output the relative residual if necessary ---*/
    
    if ((((monitoring) && (rank == MASTER_NODE)) && ((i+1) % 50 == 0)) && (rank == MASTER_NODE)) WriteHistory(i+1, beta, norm0);
    
  }

  /*---  Solve the least-squares system and update solution ---*/
  
  SolveReduced(i, H, g, y);
  for (int k = 0; k < i; k++) {
    x.Plus_AX(y[k], z[k]);
  }
  
  if ((monitoring) && (rank == MASTER_NODE)) {
    cout << "# FGMRES final (true) residual:" << endl;
    cout << "# Iteration = " << i << ": |res|/|res0| = " << beta/norm0 << ".\n" << endl;
  }
  
//  /*---  Recalculate final (neg.) residual (this should be optional) ---*/
//  mat_vec(x, w[0]);
//  w[0] -= b;
//  su2double res = w[0].norm();
//
//  if (fabs(res - beta) > tol*10) {
//    if (rank == MASTER_NODE) {
//      cout << "# WARNING in CSysSolve::FGMRES(): " << endl;
//      cout << "# true residual norm and calculated residual norm do not agree." << endl;
//      cout << "# res - beta = " << res - beta << endl;
//    }
//  }
	
  (*residual) = beta;
	return (unsigned long) i;
  
}
void CSysSolve::ModGramSchmidt(int i, vector<vector<su2double> > & Hsbg, vector<CSysVector> & w) {
  
  bool Convergence = true;
  int rank = MASTER_NODE;

#ifdef HAVE_MPI
  int size;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
  
  /*--- Parameter for reorthonormalization ---*/
  
  static const su2double reorth = 0.98;
  
  /*--- Get the norm of the vector being orthogonalized, and find the
  threshold for re-orthogonalization ---*/
  
  su2double nrm = dotProd(w[i+1], w[i+1]);
  su2double thr = nrm*reorth;
  
  /*--- The norm of w[i+1] < 0.0 or w[i+1] = NaN ---*/

  if ((nrm <= 0.0) || (nrm != nrm)) Convergence = false;
  
  /*--- Synchronization point to check the convergence of the solver ---*/

#ifdef HAVE_MPI
  
  unsigned short *sbuf_conv = NULL, *rbuf_conv = NULL;
  sbuf_conv = new unsigned short[1]; sbuf_conv[0] = 0;
  rbuf_conv = new unsigned short[1]; rbuf_conv[0] = 0;
  
  /*--- Convergence criteria ---*/
  
  sbuf_conv[0] = Convergence;
  SU2_MPI::Reduce(sbuf_conv, rbuf_conv, 1, MPI_UNSIGNED_SHORT, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD);
  
  /*-- Compute global convergence criteria in the master node --*/
  
  sbuf_conv[0] = 0;
  if (rank == MASTER_NODE) {
    if (rbuf_conv[0] == size) sbuf_conv[0] = 1;
    else sbuf_conv[0] = 0;
  }
  
  SU2_MPI::Bcast(sbuf_conv, 1, MPI_UNSIGNED_SHORT, MASTER_NODE, MPI_COMM_WORLD);
  
  if (sbuf_conv[0] == 1) Convergence = true;
  else Convergence = false;
  
  delete [] sbuf_conv;
  delete [] rbuf_conv;
  
#endif
  
  if (!Convergence) {
    if (rank == MASTER_NODE)
      cout << "\n !!! Error: SU2 has diverged. Now exiting... !!! \n" << endl;
#ifndef HAVE_MPI
		exit(EXIT_DIVERGENCE);
#else
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Abort(MPI_COMM_WORLD,1);
    MPI_Finalize();
#endif
  }
  
  /*--- Begin main Gram-Schmidt loop ---*/
  
  for (int k = 0; k < i+1; k++) {
    su2double prod = dotProd(w[i+1], w[k]);
    Hsbg[k][i] = prod;
    w[i+1].Plus_AX(-prod, w[k]);
    
    /*--- Check if reorthogonalization is necessary ---*/
    
    if (prod*prod > thr) {
      prod = dotProd(w[i+1], w[k]);
      Hsbg[k][i] += prod;
      w[i+1].Plus_AX(-prod, w[k]);
    }
    
    /*--- Update the norm and check its size ---*/
    
    nrm -= Hsbg[k][i]*Hsbg[k][i];
    if (nrm < 0.0) nrm = 0.0;
    thr = nrm*reorth;
  }
  
  /*--- Test the resulting vector ---*/
  
  nrm = w[i+1].norm();
  Hsbg[i+1][i] = nrm;

  /*--- Scale the resulting vector ---*/
  
  w[i+1] /= nrm;

}
Esempio n. 4
0
/* STUB */
int PMPI_Probe( int source, int tag, MPI_Comm comm, MPI_Status *status )
{
  fprintf(stderr,"%s:%d: NOT IMPLEMENTED\n",__FILE__,__LINE__);
  return MPI_Abort((MPI_Comm)NULL, MPI_UNDEFINED); 
}
Esempio n. 5
0
void ADIOI_UFS_Fcntl(ADIO_File fd, int flag, ADIO_Fcntl_t *fcntl_struct, int *error_code)
{
    int i, ntimes;
    ADIO_Offset curr_fsize, alloc_size, size, len, done;
    ADIO_Status status;
    char *buf;
#if defined(MPICH2) || !defined(PRINT_ERR_MSG)
    static char myname[] = "ADIOI_UFS_FCNTL";
#endif

    switch(flag) {
    case ADIO_FCNTL_GET_FSIZE:
	fcntl_struct->fsize = lseek(fd->fd_sys, 0, SEEK_END);
	if (fd->fp_sys_posn != -1) 
	     lseek(fd->fd_sys, fd->fp_sys_posn, SEEK_SET);
	if (fcntl_struct->fsize == -1) {
#ifdef MPICH2
	    *error_code = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, myname, __LINE__, MPI_ERR_IO, "**io",
		"**io %s", strerror(errno));
#elif defined(PRINT_ERR_MSG)
			*error_code = MPI_ERR_UNKNOWN;
#else /* MPICH-1 */
	    *error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	    ADIOI_Error(fd, *error_code, myname);	    
#endif
	}
	else *error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_DISKSPACE:
	/* will be called by one process only */
	/* On file systems with no preallocation function, I have to 
           explicitly write 
           to allocate space. Since there could be holes in the file, 
           I need to read up to the current file size, write it back, 
           and then write beyond that depending on how much 
           preallocation is needed.
           read/write in sizes of no more than ADIOI_PREALLOC_BUFSZ */

	curr_fsize = lseek(fd->fd_sys, 0, SEEK_END);
	alloc_size = fcntl_struct->diskspace;

	size = ADIOI_MIN(curr_fsize, alloc_size);
	
	ntimes = (size + ADIOI_PREALLOC_BUFSZ - 1)/ADIOI_PREALLOC_BUFSZ;
	buf = (char *) ADIOI_Malloc(ADIOI_PREALLOC_BUFSZ);
	done = 0;

	for (i=0; i<ntimes; i++) {
	    len = ADIOI_MIN(size-done, ADIOI_PREALLOC_BUFSZ);
	    ADIO_ReadContig(fd, buf, len, MPI_BYTE, ADIO_EXPLICIT_OFFSET, done,
			    &status, error_code);
	    if (*error_code != MPI_SUCCESS) {
#ifdef MPICH2
		*error_code = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, myname, __LINE__, MPI_ERR_IO, "**io",
		    "**io %s", strerror(errno));
#elif defined(PRINT_ERR_MSG)
		FPRINTF(stderr, "ADIOI_UFS_Fcntl: To preallocate disk space, ROMIO needs to read the file and write it back, but is unable to read the file. Please give the file read permission and open it with MPI_MODE_RDWR.\n");
		MPI_Abort(MPI_COMM_WORLD, 1);
#else /* MPICH-1 */
		*error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_PREALLOC_PERM,
			      myname, (char *) 0, (char *) 0);
		ADIOI_Error(fd, *error_code, myname);
#endif
                return;  
	    }
	    ADIO_WriteContig(fd, buf, len, MPI_BYTE, ADIO_EXPLICIT_OFFSET, 
                             done, &status, error_code);
	    if (*error_code != MPI_SUCCESS) return;
	    done += len;
	}

	if (alloc_size > curr_fsize) {
	    memset(buf, 0, ADIOI_PREALLOC_BUFSZ); 
	    size = alloc_size - curr_fsize;
	    ntimes = (size + ADIOI_PREALLOC_BUFSZ - 1)/ADIOI_PREALLOC_BUFSZ;
	    for (i=0; i<ntimes; i++) {
		len = ADIOI_MIN(alloc_size-done, ADIOI_PREALLOC_BUFSZ);
		ADIO_WriteContig(fd, buf, len, MPI_BYTE, ADIO_EXPLICIT_OFFSET, 
				 done, &status, error_code);
		if (*error_code != MPI_SUCCESS) return;
		done += len;  
	    }
	}
	ADIOI_Free(buf);
	if (fd->fp_sys_posn != -1) 
	    lseek(fd->fd_sys, fd->fp_sys_posn, SEEK_SET);
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_IOMODE:
        /* for implementing PFS I/O modes. will not occur in MPI-IO
           implementation.*/
	if (fd->iomode != fcntl_struct->iomode) {
	    fd->iomode = fcntl_struct->iomode;
	    MPI_Barrier(MPI_COMM_WORLD);
	}
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_ATOMICITY:
	fd->atomicity = (fcntl_struct->atomicity == 0) ? 0 : 1;
	*error_code = MPI_SUCCESS;
	break;

    default:
	FPRINTF(stderr, "Unknown flag passed to ADIOI_UFS_Fcntl\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }
}
Esempio n. 6
0
/*
 * This test makes sure that after a failure, the correct group of failed
 * processes is returned from MPIX_Comm_failure_ack/get_acked.
 */
int main(int argc, char **argv)
{
    int rank, size, err, result, i;
    char buf[10] = " No errors";
    char error[MPI_MAX_ERROR_STRING];
    MPI_Group failed_grp, one_grp, world_grp;
    int one[] = { 1 };
    int world_ranks[] = { 0, 1, 2 };
    int failed_ranks[3];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    if (size < 3) {
        fprintf(stderr, "Must run with at least 3 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    if (rank == 0) {
        err = MPI_Recv(buf, 10, MPI_CHAR, 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        if (MPI_SUCCESS == err) {
            fprintf(stderr, "Expected a failure for receive from rank 1\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        err = MPIX_Comm_failure_ack(MPI_COMM_WORLD);
        if (MPI_SUCCESS != err) {
            int ec;
            MPI_Error_class(err, &ec);
            MPI_Error_string(err, error, &size);
            fprintf(stderr, "MPIX_Comm_failure_ack returned an error: %d\n%s", ec, error);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        err = MPIX_Comm_failure_get_acked(MPI_COMM_WORLD, &failed_grp);
        if (MPI_SUCCESS != err) {
            int ec;
            MPI_Error_class(err, &ec);
            MPI_Error_string(err, error, &size);
            fprintf(stderr, "MPIX_Comm_failure_get_acked returned an error: %d\n%s", ec, error);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        MPI_Comm_group(MPI_COMM_WORLD, &world_grp);
        MPI_Group_incl(world_grp, 1, one, &one_grp);
        MPI_Group_compare(one_grp, failed_grp, &result);
        if (MPI_IDENT != result) {
            fprintf(stderr, "First failed group contains incorrect processes\n");
            MPI_Group_size(failed_grp, &size);
            MPI_Group_translate_ranks(failed_grp, size, world_ranks, world_grp, failed_ranks);
            for (i = 0; i < size; i++)
                fprintf(stderr, "DEAD: %d\n", failed_ranks[i]);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        MPI_Group_free(&failed_grp);

        err = MPI_Recv(buf, 10, MPI_CHAR, 2, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        if (MPI_SUCCESS != err) {
            fprintf(stderr, "First receive failed\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        err = MPI_Recv(buf, 10, MPI_CHAR, 2, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        if (MPI_SUCCESS == err) {
            fprintf(stderr, "Expected a failure for receive from rank 2\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        err = MPIX_Comm_failure_get_acked(MPI_COMM_WORLD, &failed_grp);
        if (MPI_SUCCESS != err) {
            int ec;
            MPI_Error_class(err, &ec);
            MPI_Error_string(err, error, &size);
            fprintf(stderr, "MPIX_Comm_failure_get_acked returned an error: %d\n%s", ec, error);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        MPI_Group_compare(one_grp, failed_grp, &result);
        if (MPI_IDENT != result) {
            fprintf(stderr, "Second failed group contains incorrect processes\n");
            MPI_Group_size(failed_grp, &size);
            MPI_Group_translate_ranks(failed_grp, size, world_ranks, world_grp, failed_ranks);
            for (i = 0; i < size; i++)
                fprintf(stderr, "DEAD: %d\n", failed_ranks[i]);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        fprintf(stdout, " No errors\n");
    }
    else if (rank == 2) {
        MPI_Ssend(buf, 10, MPI_CHAR, 0, 0, MPI_COMM_WORLD);

        exit(EXIT_FAILURE);
    }

    MPI_Group_free(&failed_grp);
    MPI_Group_free(&one_grp);
    MPI_Group_free(&world_grp);
    MPI_Finalize();
}
Esempio n. 7
0
int main(int argc, char** argv) {

  int rank, size;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  
  int mpi_lattice_size = lround(sqrt(size));
  
  if(size < 4 || ! (mpi_lattice_size*mpi_lattice_size==size) ) {
    printf("You have to use a square number of MPI processes.");
    MPI_Abort(MPI_COMM_WORLD,1);
  }
  
  if( find_option( argc, argv, "-h" ) >= 0 )
  {
      printf( "Options:\n" );
      printf( "-h to see this help\n" );
      printf( "-n <int> to set the grid size\n" );
      printf( "-o <filename> to specify the output file name\n" );
      MPI_Abort(MPI_COMM_WORLD,1);
  }
  
  int GRIDSIZE = read_int( argc, argv, "-n", DEFAULT_GRIDSIZE );
  // Check gridsize for some basic assumptions
  if(GRIDSIZE%2 || GRIDSIZE%(mpi_lattice_size)) {
    printf("Only even Gridsize allowed and\nGridsize has to be a multiple of the number of MPI procs!\n");
    MPI_Abort(MPI_COMM_WORLD,1);
  }

  FILE *f;

  if(rank==0) {
    char *savename = read_string( argc, argv, "-o", "sample_conduct.txt" );
    f = savename ? fopen( savename, "w" ) : NULL;
    if( f == NULL )
    {
        printf( "failed to open %s\n", savename );
        MPI_Abort(MPI_COMM_WORLD,1);
    }
  }
  

  int my_mpi_i,my_mpi_j;
  int my_gridsize= GRIDSIZE/(mpi_lattice_size);
  int t,i,j;
  double *T, *Tn,*Tf;
  
  my_mpi_j=rank%mpi_lattice_size;
  my_mpi_i=rank/mpi_lattice_size;
  
  // Allocate Grid with a border on every side
  int padded_grid_size = my_gridsize+2;
  if(rank == 0) {
    Tf=(double *) malloc(GRIDSIZE*GRIDSIZE*sizeof(double));
  }
  T=(double *) malloc((padded_grid_size)*(padded_grid_size)*sizeof(double));
  Tn=(double *) malloc((padded_grid_size)*(padded_grid_size)*sizeof(double));
  

  
  // remember -- our grid has a border around it!
  if(rank==0)
    init_cells(Tf,GRIDSIZE);
  else {
    int i,j;
    for (i=0;i<padded_grid_size;i++)
      for(j=0;j<padded_grid_size;j++)
        T[i*padded_grid_size+j]=0;
  }
  
  
  if(rank==0) {
    for(i=0;i<mpi_lattice_size;i++) {
      for(j=0;j<mpi_lattice_size;j++) {
        if(i==0 && j==0) {
          int k,l,m=1,n=1;
          for(k=0;k<padded_grid_size;k++)
            for(l=0;l<padded_grid_size;l++)
              T[k*padded_grid_size+l]=0;
          
          for(k=0;k<my_gridsize;k++) {
          
            for(l=0;l<my_gridsize;l++) {
            
              T[m*padded_grid_size+n]=Tf[k*(GRIDSIZE) + l];
              n++;
            }
            m++;
            n=1;
          }
          continue;
        }
        
        //re-use Tn for temp arrays
        int k,l,m=1,n=1;
        for(k=0;k<padded_grid_size;k++)
          for(l=0;l<padded_grid_size;l++)
            Tn[k*padded_grid_size+l]=0;
        
        for(k=0;k<my_gridsize;k++) {
          for(l=0;l<my_gridsize;l++) {
            Tn[m*padded_grid_size+n]=Tf[(k + my_gridsize*i)*(GRIDSIZE) + l+my_gridsize*j];
            n++;
          }
          m++;
          n=1;
        }
        int dest;
        dest=i*mpi_lattice_size+j;
        // printf("\nDest: %d from %d where %d\n  ",dest,size,mpi_lattice_size);
        MPI_Send(Tn,padded_grid_size*padded_grid_size,MPI_DOUBLE,dest,42,MPI_COMM_WORLD);
      }
    }
  }
  else {
    // printf("\nRank: %d %d %d\n  ",rank,my_mpi_i,my_mpi_j);
    MPI_Recv(T,padded_grid_size*padded_grid_size,MPI_DOUBLE,0,42,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
  }
  
  // printf("\nRank: %d %d %d\n  ",rank,my_mpi_i,my_mpi_j);
  // print(T,padded_grid_size,rank);
  // MPI_Barrier(MPI_COMM_WORLD);
  // exit(1);
  
  

  
  for(t=1;t<=TIMESTEPS;t++) { // Loop for the time steps
  
    // get the neighbors:
    put_neighbor(T,0,1,my_mpi_i,my_mpi_j,mpi_lattice_size,my_gridsize);
    get_neighbor(T,0,-1,my_mpi_i,my_mpi_j,mpi_lattice_size,my_gridsize);
    
    put_neighbor(T,1,0,my_mpi_i,my_mpi_j,mpi_lattice_size,my_gridsize);
    get_neighbor(T,-1,0,my_mpi_i,my_mpi_j,mpi_lattice_size,my_gridsize);
    
    put_neighbor(T,0,-1,my_mpi_i,my_mpi_j,mpi_lattice_size,my_gridsize);
    get_neighbor(T,0,1,my_mpi_i,my_mpi_j,mpi_lattice_size,my_gridsize);
    
    put_neighbor(T,-1,0,my_mpi_i,my_mpi_j,mpi_lattice_size,my_gridsize);
    get_neighbor(T,1,0,my_mpi_i,my_mpi_j,mpi_lattice_size,my_gridsize);

    
    MPI_Barrier(MPI_COMM_WORLD);    
    /*
    printf("\nI am: %d\n",rank);
    print(T,padded_grid_size,t);
    printf("\n");
    */



    for(i=1;i<my_gridsize+1;i++) {
      for(j=1;j<my_gridsize+1;j++) {
        
        Tn[i*padded_grid_size + j] = T[(i-1)*padded_grid_size + (j)] + T[(i+1)*padded_grid_size + (j)]
                                   + T[(i)*padded_grid_size + (j-1)] + T[(i)*padded_grid_size + (j+1)];
        Tn[i*padded_grid_size + j] /= 4;
      }
    }

    for(i=1;i<my_gridsize+1;i++) {
      for(j=1;j<my_gridsize+1;j++) {
        T[i*padded_grid_size + j] = Tn[i*padded_grid_size + j];
      }
    }
    
    /*
    MPI_Barrier(MPI_COMM_WORLD);    
    printf("\nI am: %d\n",rank);
    print(T,padded_grid_size,t);
    printf("\n");
    */
    
    if(!(t % PRINTSTEP)) {
    
      if(rank==0) {
        for(i=0;i<mpi_lattice_size;i++) {
          for(j=0;j<mpi_lattice_size;j++) {
            if(i==0 && j==0) {
              int k,l,m=1,n=1;
              for(k=0;k<my_gridsize;k++) {
                for(l=0;l<my_gridsize;l++) {
                  Tf[k*(GRIDSIZE) + l]=T[m*padded_grid_size+n];
                  n++;
                }
                m++;
                n=1;
              }
            }
            else {
              int source=i*mpi_lattice_size+j;
              MPI_Recv(Tn,padded_grid_size*padded_grid_size,MPI_DOUBLE,source,42,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
              //re-use Tn for temp arrays
              int k,l,m=1,n=1;
              for(k=0;k<my_gridsize;k++) {
                for(l=0;l<my_gridsize;l++) {
                  Tf[(k + my_gridsize*i)*(GRIDSIZE) + l+my_gridsize*j]=Tn[m*padded_grid_size+n];
                  n++;
                }
                m++;
                n=1;
              }
            }
          }
        }
      }
      else {
        MPI_Send(T,padded_grid_size*padded_grid_size,MPI_DOUBLE,0,42,MPI_COMM_WORLD);
      }

      if(rank==0) {
         // print(Tf,GRIDSIZE,t);
        save(f,Tf,GRIDSIZE,t);
        printf("Time: %d\n",t);
      }
    }
    
  }
  
  if(rank==0)
    fclose(f);
  
  
    
  MPI_Finalize();
  
  return 0;
}
Esempio n. 8
0
int main(int argc, char **argv)
{
    int buf[1024], amode, flag, mynod, len, i;
    MPI_File fh;
    MPI_Status status;
    MPI_Datatype newtype;
    MPI_Offset disp, offset;
    MPI_Group group;
    MPI_Datatype etype, filetype;
    char datarep[25], *filename;

    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &mynod);

/* process 0 takes the file name as a command-line argument and 
   broadcasts it to other processes */
    if (!mynod) {
	i = 1;
	while ((i < argc) && strcmp("-fname", *argv)) {
	    i++;
	    argv++;
	}
	if (i >= argc) {
	    printf("\n*#  Usage: misc  <mpiparameter> -- -fname filename\n\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
	argv++;
	len = strlen(*argv);
	filename = (char *) malloc(len+1);
	strcpy(filename, *argv);
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
    }
    else {
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	filename = (char *) malloc(len+1);
	MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
    }


    MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
                  MPI_INFO_NULL, &fh);

    MPI_File_write(fh, buf, 1024, MPI_INT, &status);

    MPI_File_sync(fh);

    MPI_File_get_amode(fh, &amode);
    if (!mynod) printf("testing MPI_File_get_amode\n");
    if (amode != (MPI_MODE_CREATE | MPI_MODE_RDWR))
	printf("amode is %d, should be %d\n\n", amode, MPI_MODE_CREATE |
                      MPI_MODE_RDWR);

    MPI_File_get_atomicity(fh, &flag);
    if (flag) printf("atomicity is %d, should be 0\n", flag);
    if (!mynod) printf("setting atomic mode\n");
    MPI_File_set_atomicity(fh, 1);
    MPI_File_get_atomicity(fh, &flag);
    if (!flag) printf("atomicity is %d, should be 1\n", flag);
    MPI_File_set_atomicity(fh, 0);
    if (!mynod) printf("reverting back to nonatomic mode\n");

    MPI_Type_vector(10, 10, 20, MPI_INT, &newtype);
    MPI_Type_commit(&newtype);

    MPI_File_set_view(fh, 1000, MPI_INT, newtype, "native", MPI_INFO_NULL);
    if (!mynod) printf("testing MPI_File_get_view\n");
    MPI_File_get_view(fh, &disp, &etype, &filetype, datarep);
    if ((disp != 1000) || strcmp(datarep, "native"))
	printf("disp = %I64, datarep = %s, should be 1000, native\n\n", disp, datarep);

    if (!mynod) printf("testing MPI_File_get_byte_offset\n");
    MPI_File_get_byte_offset(fh, 10, &disp);
    if (disp != (1000+20*sizeof(int))) printf("byte offset = %I64, should be %d\n\n", disp, (int) (1000+20*sizeof(int)));

    MPI_File_get_group(fh, &group);

    if (!mynod) printf("testing MPI_File_set_size\n");
    MPI_File_set_size(fh, 1000+15*sizeof(int));
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_File_sync(fh);
    MPI_File_get_size(fh, &disp);
    if (disp != 1000+15*sizeof(int)) printf("file size = %I64, should be %d\n\n", disp, (int) (1000+15*sizeof(int)));
 
    if (!mynod) printf("seeking to eof and testing MPI_File_get_position\n");
    MPI_File_seek(fh, 0, MPI_SEEK_END);
    MPI_File_get_position(fh, &disp);
    if (disp != 10) printf("file pointer posn = %I64, should be 10\n\n", disp);

    if (!mynod) printf("testing MPI_File_get_byte_offset\n");
    MPI_File_get_byte_offset(fh, disp, &offset);
    if (offset != (1000+20*sizeof(int))) printf("byte offset = %I64, should be %d\n\n", offset, (int) (1000+20*sizeof(int)));
    MPI_Barrier(MPI_COMM_WORLD);

    if (!mynod) printf("testing MPI_File_seek with MPI_SEEK_CUR\n");
    MPI_File_seek(fh, -10, MPI_SEEK_CUR);
    MPI_File_get_position(fh, &disp);
    MPI_File_get_byte_offset(fh, disp, &offset);
    if (offset != 1000)
	printf("file pointer posn in bytes = %I64, should be 1000\n\n", offset);

    if (!mynod) printf("preallocating disk space up to 8192 bytes\n");
    MPI_File_preallocate(fh, 8192);

    if (!mynod) printf("closing the file and deleting it\n");
    MPI_File_close(&fh);
    
    MPI_Barrier(MPI_COMM_WORLD);
    if (!mynod) MPI_File_delete(filename, MPI_INFO_NULL);

    MPI_Type_free(&newtype);
    MPI_Type_free(&filetype);
    MPI_Group_free(&group);
    free(filename);
    MPI_Finalize(); 
    return 0;
}
Esempio n. 9
0
void data_server(int agents_total, int world_width, int world_height)
{
	int np; 
	MPI_Comm_size(MPI_COMM_WORLD, &np);

	/* create a type for struct agent */
	const int nitems=5;
   	int blocklengths[5] = {1,1,1,1,1};
   	MPI_Datatype types[5] = {MPI_INT, MPI_INT, MPI_INT, MPI_FLOAT, MPI_FLOAT};
	MPI_Datatype mpi_agent_type;
	MPI_Aint offsets[5];

	offsets[0] = offsetof(agent, id);
    	offsets[1] = offsetof(agent, x);
    	offsets[2] = offsetof(agent, y);
    	offsets[3] = offsetof(agent, z);
    	offsets[4] = offsetof(agent, w);

	MPI_Type_create_struct(nitems, blocklengths, offsets, types, &mpi_agent_type);
	MPI_Type_commit(&mpi_agent_type);


	int num_comp_nodes = np -1;
	unsigned int num_bytes = agents_total * sizeof(agent);
	agent *h_agents_in, *h_agents_out;

	/* allocate input data */
	h_agents_in = (agent *)malloc(num_bytes);
	h_agents_out = (agent *)malloc(num_bytes);
	if(h_agents_in == NULL || h_agents_out == NULL)
	{
		printf("server couldn't allocate memory\n");
		MPI_Abort(MPI_COMM_WORLD, 1);
	}

	/* initialize input data */
	init_data(h_agents_in, agents_total);

#ifdef DEBUG 
	printf("Init data\n");
	display_data(h_agents_in, agents_total);
#endif

	int world_height_node = world_height / num_comp_nodes;
//	printf("world_height: %d\n", world_height_node);
	agent h_agents_node_in[num_comp_nodes][agents_total], h_agents_node_out[num_comp_nodes][agents_total];
	for(int process = 0; process < num_comp_nodes; process++)
	{	
		for(int i = 0; i < agents_total; i++)
		{
			if(  ( h_agents_in[i].y >= (process * world_height_node) ) and ( h_agents_in[i].y < ( (process + 1) * world_height_node ) )  )
				h_agents_node_in[process][i] = h_agents_in[i];
		}
	}

/***	
	printf("copy data 0\n");
	display_data(h_agents_node_in[0], agents_total);
	printf("copy data 1\n");
	display_data(h_agents_node_in[1], agents_total);
	printf("copy data 2\n");
	display_data(h_agents_node_in[2], agents_total);
***/

	/* send data to compute nodes */
	for(int process = 0; process < num_comp_nodes; process++)
		MPI_Send(h_agents_node_in[process], agents_total, mpi_agent_type, process, 0, MPI_COMM_WORLD);

	/* Wait for nodes to compute */
	MPI_Barrier(MPI_COMM_WORLD);
	
	/* Collect output data */
	MPI_Status status;

	for(int process = 0; process < num_comp_nodes; process++)
		MPI_Recv(h_agents_node_out[process], agents_total, mpi_agent_type, process, DATA_COLLECT, MPI_COMM_WORLD, &status); 

#ifdef DEBUG
        printf("Final Data\n");	
	/* display output data */
//	display_data(h_agents_out, agents_total);
#endif
	
	/* release resources */
	free(h_agents_in);
	free(h_agents_out); 
//	free(h_agents_node_in); 
//	free(h_agents_node_out); 
}
Esempio n. 10
0
int main(int argc, char* argv[])
{
	double init_time, brick_time, lod_time;
	double start_time = 0;
	MPI_Init(&argc,&argv);
	int size,rank;
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	if(rank==0) {
		start_time = MPI_Wtime();
	}	
	if(argc!=7 || strcmp(argv[1],"-h") == 0 || strcmp(argv[1],"--help") == 0) {
		if(rank==0)
			PrintHelp();
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);	
	}
	char *endptr = NULL;
	/* Size of the volume {x,y,z} */
	size_t *VOLUME = malloc(sizeof(size_t)*3);
	VOLUME[0]=(size_t)strtol(argv[1], &endptr, 10);
	VOLUME[1]=(size_t)strtol(argv[2], &endptr, 10);
	VOLUME[2]=(size_t)strtol(argv[3], &endptr, 10);
	/* Dimension of the brick excl. Ghostcells */
	const size_t BRICKSIZE = (size_t)strtol(argv[4], &endptr, 10);
	if(BRICKSIZE<1) {
		printf("BRICKSIZE is smaller than 1!\n");
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}
	/* Size of ghost cells for later subtraction */
	const size_t GHOSTCELLDIM = (size_t)strtol(argv[5], &endptr, 10);

	/* # bricks per dimension */
	size_t bricks_per_dimension[3];
	/* number of bricks? */
	size_t numberofbricks = NBricks(VOLUME, BRICKSIZE, bricks_per_dimension);
	if(numberofbricks == 0) {
		printf("ERROR determining number of bricks!\n");
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}

	/* INITIALIZATION */
	size_t *myoffsets = malloc(sizeof(size_t)*3);
	size_t mybricks;
	size_t bricks[size];
	size_t starting_brick[size];
	size_t mystart;
	size_t GBSIZE=BRICKSIZE+2*GHOSTCELLDIM;

	if(Init_MPI(rank, size, &mybricks, &mystart, myoffsets, bricks, starting_brick, numberofbricks, VOLUME, BRICKSIZE) != 0) {
		printf("ERROR @ Init_MPI\n");
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}
	if(rank==0) {
		init_time = MPI_Wtime();
		init_time -= start_time;
		start_time = MPI_Wtime();
	}
	/* BRICKING START */
	/* input file stream */
	MPI_File fpi;
	int err;
	err = MPI_File_open(MPI_COMM_WORLD, argv[6], MPI_MODE_RDONLY, MPI_INFO_NULL, &fpi);
	if(err) {
		printf("ERROR opening file %s\n", argv[6]);
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}

	/* output file stream */
	MPI_File fpo;
	char fn[256];
	sprintf(fn, "b_%zu_%zu_%zu_%zu^3.raw", VOLUME[0], VOLUME[1], VOLUME[2], GBSIZE);
	err = MPI_File_open(MPI_COMM_WORLD, fn, MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fpo);
	if(err) {
		printf("ERROR opening file %s\n", fn);
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}

	/* read from input file, reorganize data and write into output file */
	if(brick(fpi, fpo, mystart, mybricks, GBSIZE, GHOSTCELLDIM, BRICKSIZE, VOLUME, bricks_per_dimension) != 0) {
		printf("ERROR @ Rank %d: brick() failed \n", rank);
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}
	MPI_File_close(&fpi);
	MPI_File_close(&fpo);
	if(rank==0) {
		brick_time = MPI_Wtime();
		brick_time -= start_time;
		start_time = MPI_Wtime();
	}
	/* END OF BRICKING */

	size_t lod = 1;
	/* TODO set finished correct */
	bool finished = false;
	while(!finished) {
		/* read from */
		MPI_File fin;
		MPI_File fout;
		if(lod==1) {
			err = MPI_File_open(MPI_COMM_WORLD, fn, MPI_MODE_RDONLY, MPI_INFO_NULL, &fin);
			if(err) {
				printf("Rank %d: ERROR opening file %s\n", rank, fn);
				MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
			}
		} else {
			char filename_read[256];
			sprintf(filename_read, "xmulti_%zu.raw", lod-1);
			err = MPI_File_open(MPI_COMM_WORLD, filename_read, MPI_MODE_RDONLY, MPI_INFO_NULL, &fin);
			if(err) {
				printf("Rank %d: ERROR opening file %s\n", rank, fn);
				MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
			}
		}
		/* write to */
		char filename_write[256];
		sprintf(filename_write, "xmulti_%zu.raw", lod);
		err = MPI_File_open(MPI_COMM_WORLD, filename_write, MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &fout);
		if(err) {
			printf("Rank %d: ERROR opening file %s\n", rank, filename_write);
			MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
		}
		size_t new_no_b=0;
		size_t *new_bpd = malloc(sizeof(size_t)*3);
		new_bpd[0] = 0;
		new_bpd[1] = 0;
		new_bpd[2] = 0;
		size_t *old_bpd = malloc(sizeof(size_t)*3);
		old_bpd[0] = bricks_per_dimension[0];
		old_bpd[1] = bricks_per_dimension[1];
		old_bpd[2] = bricks_per_dimension[2];
		for(size_t i=0; i<lod; i++) {
			new_no_b = NBricks(old_bpd, 2, new_bpd);
			old_bpd[0] = new_bpd[0];
			old_bpd[1] = new_bpd[1];
			old_bpd[2] = new_bpd[2];
		}
		/* Calculate next LOD */
		if(GetNewLOD(fin, fout, bricks_per_dimension, new_bpd, new_no_b,BRICKSIZE,GHOSTCELLDIM,lod,rank,size) != 0) {
			printf("ERROR: Rank %d @ GetNewLOD()\n",rank);
			MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE);
		}
		lod++;
		free(new_bpd);
		free(old_bpd);
		MPI_File_close(&fin);
		MPI_File_close(&fout);
		if(new_no_b<2)
			finished=true;
	}
	if(rank==0) {
		lod_time = MPI_Wtime();
		lod_time -= start_time;
		printf("Initialization: %1.3f || Bricking: %1.3f || LOD: %1.3f\n",init_time, brick_time, lod_time);
	}
	free(myoffsets);
	free(VOLUME);
	MPI_Finalize();
	return EXIT_SUCCESS;
}
Esempio n. 11
0
int main( int argc, char **argv )
{
    int      err = 0;
    int      *sendbuf, *recvbuf, *recvcounts;
    int      size, rank, i, j, idx, mycount, sumval;
    MPI_Comm comm;


    MTest_Init( &argc, &argv );
    comm = MPI_COMM_WORLD;

    MPI_Comm_size( comm, &size );
    MPI_Comm_rank( comm, &rank );
    recvcounts = (int *)malloc( size * sizeof(int) );
    if (!recvcounts) {
	fprintf( stderr, "Could not allocate %d ints for recvcounts\n", 
		 size );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }
    mycount = (1024 * 1024) / size;
    for (i=0; i<size; i++) 
	recvcounts[i] = mycount;
    sendbuf = (int *) malloc( mycount * size * sizeof(int) );
    if (!sendbuf) {
	fprintf( stderr, "Could not allocate %d ints for sendbuf\n", 
		 mycount * size );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }
    idx = 0;
    for (i=0; i<size; i++) {
	for (j=0; j<mycount; j++) {
	    sendbuf[idx++] = rank + i;
	}
    }
    recvbuf = (int *)malloc( mycount * sizeof(int) );
    if (!recvbuf) {
	fprintf( stderr, "Could not allocate %d ints for recvbuf\n", 
		 mycount );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }
    for (i=0; i<mycount; i++) {
	recvbuf[i] = -1;
    }

    MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm );

    sumval = size * rank + ((size - 1) * size)/2;
    /* recvbuf should be size * (rank + i) */
    for (i=0; i<mycount; i++) {
	if (recvbuf[i] != sumval) {
	    err++;
	    if (err < MAX_ERRORS) {
		fprintf( stdout, "Did not get expected value for reduce scatter\n" );
		fprintf( stdout, "[%d] Got recvbuf[%d] = %d expected %d\n",
			 rank, i, recvbuf[i], sumval );
	    }
	}
    }

    MPI_Reduce_scatter( MPI_IN_PLACE, sendbuf, recvcounts, MPI_INT, MPI_SUM, 
			comm );

    sumval = size * rank + ((size - 1) * size)/2;
    /* recv'ed values for my process should be size * (rank + i) */
    for (i=0; i<mycount; i++) {
	if (sendbuf[i] != sumval) {
	    err++;
	    if (err < MAX_ERRORS) {
		fprintf( stdout, "Did not get expected value for reduce scatter (in place)\n" );
		fprintf( stdout, "[%d] Got buf[%d] = %d expected %d\n", 
			 rank, i, sendbuf[rank*mycount+i], sumval );
	    }
	}
    }

    free(sendbuf);
    free(recvbuf);
    free(recvcounts);
       
    MTest_Finalize( err );

    MPI_Finalize( );

    return 0;
}
Esempio n. 12
0
RSL_LITE_PACK ( int * Fcomm0, char * buf , int * shw0 , 
           int * sendbegm0 , int * sendwm0 , int * sendbegp0 , int * sendwp0 ,
           int * recvbegm0 , int * recvwm0 , int * recvbegp0 , int * recvwp0 ,
           int * typesize0 , int * xy0 , int * pu0 , int * imemord , int * xstag0, /* not used */
           int *me0, int * np0 , int * np_x0 , int * np_y0 , 
           int * ids0 , int * ide0 , int * jds0 , int * jde0 , int * kds0 , int * kde0 ,
           int * ims0 , int * ime0 , int * jms0 , int * jme0 , int * kms0 , int * kme0 ,
           int * ips0 , int * ipe0 , int * jps0 , int * jpe0 , int * kps0 , int * kpe0 )
{
  int me, np, np_x, np_y ;
  int sendbegm , sendwm, sendbegp , sendwp ;
  int recvbegm , recvwm, recvbegp , recvwp ;
  int shw , typesize ;
  int ids , ide , jds , jde , kds , kde ;
  int ims , ime , jms , jme , kms , kme ;
  int ips , ipe , jps , jpe , kps , kpe ;
  int xy ;   /* y = 0 , x = 1 */
  int pu ;   /* pack = 0 , unpack = 1 */
  register int i, j, k, t ;
#ifdef crayx1
  register int i2,i3,i4,i_offset;
#endif
  char *p ;
  int da_buf ;
  int yp, ym, xp, xm ;
  int nbytes, ierr ;
  register int *pi, *qi ;

#ifndef STUBMPI
  MPI_Comm comm, *comm0, dummy_comm ;
  int js, je, ks, ke, is, ie, wcount ;

  comm0 = &dummy_comm ;
  *comm0 = MPI_Comm_f2c( *Fcomm0 ) ;

  shw = *shw0 ;          /* logical half-width of stencil */
  sendbegm = *sendbegm0 ;  /* send index of sten copy (edge = 1), lower/left */
  sendwm   = *sendwm0   ;  /* send width of sten copy counting towards edge, lower/left */
  sendbegp = *sendbegp0 ;  /* send index of sten copy (edge = 1), upper/right */
  sendwp   = *sendwp0   ;  /* send width of sten copy counting towards edge, upper/right */
  recvbegm = *recvbegm0 ;  /* recv index of sten copy (edge = 1), lower/left */
  recvwm   = *recvwm0   ;  /* recv width of sten copy counting towards edge, lower/left */
  recvbegp = *recvbegp0 ;  /* recv index of sten copy (edge = 1), upper/right */
  recvwp   = *recvwp0   ;  /* recv width of sten copy counting towards edge, upper/right */
  me = *me0 ; np = *np0 ; np_x = *np_x0 ; np_y = *np_y0 ;
  typesize = *typesize0 ;
  ids = *ids0-1 ; ide = *ide0-1 ; jds = *jds0-1 ; jde = *jde0-1 ; kds = *kds0-1 ; kde = *kde0-1 ;
  ims = *ims0-1 ; ime = *ime0-1 ; jms = *jms0-1 ; jme = *jme0-1 ; kms = *kms0-1 ; kme = *kme0-1 ;
  ips = *ips0-1 ; ipe = *ipe0-1 ; jps = *jps0-1 ; jpe = *jpe0-1 ; kps = *kps0-1 ; kpe = *kpe0-1 ;
  xy = *xy0 ;
  pu = *pu0 ;

/* need to adapt for other memory orders */

#define RANGE(S1,E1,S2,E2,S3,E3,S4,E4) (((E1)-(S1)+1)*((E2)-(S2)+1)*((E3)-(S3)+1)*((E4)-(S4)+1))
#define IMAX(A) (((A)>ids)?(A):ids)
#define IMIN(A) (((A)<ide)?(A):ide)
#define JMAX(A) (((A)>jds)?(A):jds)
#define JMIN(A) (((A)<jde)?(A):jde)

  da_buf = ( pu == 0 ) ? RSL_SENDBUF : RSL_RECVBUF ;

  if ( ips <= ipe && jps <= jpe ) {

  if ( np_y > 1 && xy == 0 ) {
    MPI_Cart_shift( *comm0 , 0, 1, &ym, &yp ) ;
    if ( yp != MPI_PROC_NULL && jpe <= jde  && jde != jpe ) {
      p = buffer_for_proc( yp , 0 , da_buf ) ;
      if ( pu == 0 ) {
        if ( sendwp > 0 ) {
          je = jpe - sendbegp + 1 ; js = je - sendwp + 1 ;
          ks = kps           ; ke = kpe ;
          is = IMAX(ips-shw) ; ie = IMIN(ipe+shw) ;
          nbytes = buffer_size_for_proc( yp, da_buf ) ;
	  if ( yp_curs + RANGE( js, je, kps, kpe, ips-shw, ipe+shw, 1, typesize ) > nbytes ) {
#ifndef MS_SUA
	    fprintf(stderr,"memory overwrite in rsl_lite_pack, Y pack up, %d > %d\n",
	        yp_curs + RANGE( js, je, kps, kpe, ips-shw, ipe+shw, 1, typesize ), nbytes ) ;
#endif
	    MPI_Abort(MPI_COMM_WORLD, 99) ;
          }
          if ( typesize == 8 ) {
            F_PACK_LINT ( buf, p+yp_curs, imemord, &js, &je, &ks, &ke, &is, &ie, 
                                                &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            yp_curs += wcount*typesize ;
          }
	  else if ( typesize == 4 ) {
            F_PACK_INT ( buf, p+yp_curs, imemord, &js, &je, &ks, &ke, &is, &ie,
                                               &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            yp_curs += wcount*typesize ;
	  }
	  else {
#ifndef MS_SUA
            fprintf(stderr,"internal error: %s %d\n",__FILE__,__LINE__) ;
#endif
          }
        }
      } else {
        if ( recvwp > 0 ) {
          js = jpe+recvbegp         ; je = js + recvwp - 1 ;
          ks = kps           ; ke = kpe ;
          is = IMAX(ips-shw) ; ie = IMIN(ipe+shw) ;
          if ( typesize == 8 ) {
            F_UNPACK_LINT ( p+yp_curs, buf, imemord, &js, &je, &ks, &ke, &is, &ie,
                                               &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            yp_curs += wcount*typesize ;
          }
	  else if ( typesize == 4 ) {
            F_UNPACK_INT ( p+yp_curs, buf, imemord, &js, &je, &ks, &ke, &is, &ie,
                                               &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            yp_curs += wcount*typesize ;
	  }
	  else {
#ifndef MS_SUA
            fprintf(stderr,"internal error: %s %d\n",__FILE__,__LINE__) ;
#endif
	  }
	}
      }
    }
    if ( ym != MPI_PROC_NULL && jps >= jds  && jps != jds ) {
      p = buffer_for_proc( ym , 0 , da_buf ) ;
      if ( pu == 0 ) {
        if ( sendwm > 0 ) {
          js = jps+sendbegm-1 ; je = js + sendwm -1 ;
          ks = kps           ; ke = kpe ;
          is = IMAX(ips-shw) ; ie = IMIN(ipe+shw) ;
          nbytes = buffer_size_for_proc( ym, da_buf ) ;
	  if ( ym_curs + RANGE( js, je, kps, kpe, ips-shw, ipe+shw, 1, typesize ) > nbytes ) {
#ifndef  MS_SUA
	    fprintf(stderr,"memory overwrite in rsl_lite_pack, Y pack dn, %d > %d\n",
	        ym_curs + RANGE( js, je, kps, kpe, ips-shw, ipe+shw, 1, typesize ), nbytes ) ;
#endif
	    MPI_Abort(MPI_COMM_WORLD, 99) ;
          }
          if ( typesize == 8 ) {
            F_PACK_LINT ( buf, p+ym_curs, imemord, &js, &je, &ks, &ke, &is, &ie,
                                               &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            ym_curs += wcount*typesize ;
          }
	  else if ( typesize == 4 ) {
            F_PACK_INT ( buf, p+ym_curs, imemord, &js, &je, &ks, &ke, &is, &ie,
                                               &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            ym_curs += wcount*typesize ;
  	  }
	  else {
#ifndef MS_SUA
            fprintf(stderr,"internal error: %s %d\n",__FILE__,__LINE__) ;
#endif
	  }
	}
      } else {
        if ( recvwm > 0 ) {
          je = jps-recvbegm ; js = je - recvwm + 1 ;
          ks = kps           ; ke = kpe ;
          is = IMAX(ips-shw) ; ie = IMIN(ipe+shw) ;
          if ( typesize == 8 ) {
            F_UNPACK_LINT ( p+ym_curs, buf, imemord, &js, &je, &ks, &ke, &is, &ie,
                                                  &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            ym_curs += wcount*typesize ;
          }
	  else if ( typesize == 4 ) {
            F_UNPACK_INT ( p+ym_curs, buf, imemord, &js, &je, &ks, &ke, &is, &ie,
                                                 &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            ym_curs += wcount*typesize ;
	  }
	  else {
#ifndef MS_SUA
            fprintf(stderr,"internal error: %s %d\n",__FILE__,__LINE__) ;
#endif
          }
        }
      }
    }
  }

  if ( np_x > 1 && xy == 1 ) {
    MPI_Cart_shift( *comm0, 1, 1, &xm, &xp ) ;
    if ( xp != MPI_PROC_NULL  && ipe <= ide && ide != ipe ) {
      p = buffer_for_proc( xp , 0 , da_buf ) ;
      if ( pu == 0 ) {
        if ( sendwp > 0 ) {
          js = JMAX(jps-shw) ; je = JMIN(jpe+shw) ;
          ks = kps           ; ke = kpe ;
          ie = ipe - sendbegp + 1 ; is = ie - sendwp + 1 ;
          nbytes = buffer_size_for_proc( xp, da_buf ) ;
          if ( xp_curs + RANGE( js, je, kps, kpe, ipe-shw+1, ipe, 1, typesize ) > nbytes ) {
#ifndef MS_SUA
	    fprintf(stderr,"memory overwrite in rsl_lite_pack, X pack right, %d > %d\n",
	        xp_curs + RANGE( js, je, kps, kpe, ipe-shw+1, ipe, 1, typesize ), nbytes ) ;
#endif
	    MPI_Abort(MPI_COMM_WORLD, 99) ;
          }
          if ( typesize == 8 ) {
            F_PACK_LINT ( buf, p+xp_curs, imemord, &js, &je, &ks, &ke, &is, &ie,
                                                &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            xp_curs += wcount*typesize ;
          }
	  else if ( typesize == 4 ) {
            F_PACK_INT ( buf, p+xp_curs, imemord, &js, &je, &ks, &ke, &is, &ie,
                                               &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            xp_curs += wcount*typesize ;
	  }
	  else {
#ifndef MS_SUA
            fprintf(stderr,"A internal error: %s %d\n",__FILE__,__LINE__) ;
#endif
	  }
	}
      } else {
        if ( recvwp > 0 ) {
          js = JMAX(jps-shw) ; je = JMIN(jpe+shw) ;
          ks = kps           ; ke = kpe ;
          is = ipe+recvbegp  ; ie = is + recvwp - 1 ;
          if ( typesize == 8 ) {
            F_UNPACK_LINT ( p+xp_curs, buf, imemord, &js, &je, &ks, &ke, &is, &ie,
                                                  &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            xp_curs += wcount*typesize ;
          }
	  else if ( typesize == 4 ) {
            F_UNPACK_INT ( p+xp_curs, buf, imemord, &js, &je, &ks, &ke, &is, &ie,
                                                 &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            xp_curs += wcount*typesize ;
	  }
	  else {
#ifndef MS_SUA
            fprintf(stderr,"B internal error: %s %d\n",__FILE__,__LINE__) ;
            fprintf(stderr,"  stenbeg %d stenw  %d \n",is,ie) ;
            fprintf(stderr,"  is %d ie %d \n",is,ie) ;
#endif
          }
        }
      }
    }
    if ( xm != MPI_PROC_NULL  && ips >= ids && ids != ips ) {
      p = buffer_for_proc( xm , 0 , da_buf ) ;
      if ( pu == 0 ) {
        if ( sendwm > 0 ) {
          js = JMAX(jps-shw) ; je = JMIN(jpe+shw) ;
          ks = kps           ; ke = kpe ;
          is = ips+sendbegm-1 ; ie = is + sendwm-1 ;
          nbytes = buffer_size_for_proc( xm, da_buf ) ;
          if ( xm_curs + RANGE( js, je, kps, kpe, ips, ips+shw-1, 1, typesize ) > nbytes ) {
#ifndef MS_SUA
	    fprintf(stderr,"memory overwrite in rsl_lite_pack, X left , %d > %d\n",
	        xm_curs + RANGE( js, je, kps, kpe, ips, ips+shw-1, 1, typesize ), nbytes ) ;
#endif
	    MPI_Abort(MPI_COMM_WORLD, 99) ;
          }
          if ( typesize == 8 ) {
            F_PACK_LINT ( buf, p+xm_curs, imemord, &js, &je, &ks, &ke, &is, &ie,
                                                &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            xm_curs += wcount*typesize ;
          }
	  else if ( typesize == 4 ) {
            F_PACK_INT ( buf, p+xm_curs, imemord, &js, &je, &ks, &ke, &is, &ie,
                                               &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            xm_curs += wcount*typesize ;
	  }
	  else {
#ifndef MS_SUA
            fprintf(stderr,"internal error: %s %d\n",__FILE__,__LINE__) ;
#endif
          }
        }
      } else {
        if ( recvwm > 0 ) {
          js = JMAX(jps-shw) ; je = JMIN(jpe+shw) ;
          ks = kps           ; ke = kpe ;
          ie = ips-recvbegm ; is = ie - recvwm + 1 ;
          if ( typesize == 8 ) {
            F_UNPACK_LINT ( p+xm_curs, buf, imemord, &js, &je, &ks, &ke, &is, &ie,
                                                  &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            xm_curs += wcount*typesize ;
          } 
          else if ( typesize == 4 ) {
            F_UNPACK_INT ( p+xm_curs, buf, imemord, &js, &je, &ks, &ke, &is, &ie,
                                                 &jms,&jme,&kms,&kme,&ims,&ime, &wcount ) ;
            xm_curs += wcount*typesize ;
	  }
	  else {
#ifndef MS_SUA
            fprintf(stderr,"internal error: %s %d\n",__FILE__,__LINE__) ;
#endif
          }
        }
      }
    }
  }
  }
#endif

}
Esempio n. 13
0
void ADIOI_PFS_IwriteContig(ADIO_File fd, void *buf, int count, 
                MPI_Datatype datatype, int file_ptr_type,
                ADIO_Offset offset, ADIO_Request *request, int *error_code)  
{
    long *id_sys;
    ADIO_Offset off;
    int len, typesize, err;
#ifndef PRINT_ERR_MSG
    static char myname[] = "ADIOI_PFS_IWRITECONTIG";
#endif

    *request = ADIOI_Malloc_request();
    (*request)->optype = ADIOI_WRITE;
    (*request)->fd = fd;
    (*request)->datatype = datatype;

    MPI_Type_size(datatype, &typesize);
    len = count * typesize;

    id_sys = (long *) ADIOI_Malloc(sizeof(long));
    (*request)->handle = (void *) id_sys;

    off = (file_ptr_type == ADIO_INDIVIDUAL) ? fd->fp_ind : offset;

    lseek(fd->fd_sys, off, SEEK_SET);
    *id_sys = _iwrite(fd->fd_sys, buf, len);

    if ((*id_sys == -1) && (errno == EQNOMID)) {
     /* the man pages say EMREQUEST, but in reality errno is set to EQNOMID! */

        /* exceeded the max. no. of outstanding requests. */

        /* complete all previous async. requests */
        ADIOI_Complete_async(&err);

        /* try again */
	*id_sys = _iwrite(fd->fd_sys, buf, len);

        if ((*id_sys == -1) && (errno == EQNOMID)) {
#ifdef PRINT_ERR_MSG
            FPRINTF(stderr, "Error in asynchronous I/O\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
#else
	    *error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	    ADIOI_Error(fd, *error_code, myname);	    
	    return;
#endif
        }
    }
    else if (*id_sys == -1) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "Unknown errno %d in ADIOI_PFS_IwriteContig\n", errno);
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	*error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			         myname, "I/O Error", "%s", strerror(errno));
	ADIOI_Error(fd, *error_code, myname);	    
	return;
#endif
    }

    if (file_ptr_type == ADIO_INDIVIDUAL) fd->fp_ind += len; 

    (*request)->queued = 1;
    (*request)->nbytes = len;
    ADIOI_Add_req_to_list(request);
    fd->async_count++;

    fd->fp_sys_posn = -1;   /* set it to null. */

#ifdef PRINT_ERR_MSG
    *error_code = (*id_sys == -1) ? MPI_ERR_UNKNOWN : MPI_SUCCESS;
#else
    if (*id_sys == -1) {
	*error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	ADIOI_Error(fd, *error_code, myname);	    
    }
    else *error_code = MPI_SUCCESS;
#endif
}
Esempio n. 14
0
File: mpi.c Progetto: haoleer/FGOALS
FORT_NAME( mpi_abort , MPI_ABORT )(int *comm, int *errorcode, int *ierror)
{
  *ierror=MPI_Abort( *comm, *errorcode);
}
Esempio n. 15
0
/*
 * Write or read access to file using the POSIX interface.
 */
static IOR_offset_t POSIX_Xfer(int access, void *file, IOR_size_t * buffer,
                               IOR_offset_t length, IOR_param_t * param)
{
        int xferRetries = 0;
        long long remaining = (long long)length;
        char *ptr = (char *)buffer;
        long long rc;
        int fd;

        fd = *(int *)file;

        /* seek to offset */
        if (lseek64(fd, param->offset, SEEK_SET) == -1)
                ERR("lseek64() failed");

        while (remaining > 0) {
                /* write/read file */
                if (access == WRITE) {  /* WRITE */
                        if (verbose >= VERBOSE_4) {
                                fprintf(stdout,
                                        "task %d writing to offset %lld\n",
                                        rank,
                                        param->offset + length - remaining);
                        }
                        rc = write(fd, ptr, remaining);
                        if (rc == -1)
                                ERR("write() failed");
                        if (param->fsyncPerWrite == TRUE)
                                POSIX_Fsync(&fd, param);
                } else {        /* READ or CHECK */
                        if (verbose >= VERBOSE_4) {
                                fprintf(stdout,
                                        "task %d reading from offset %lld\n",
                                        rank,
                                        param->offset + length - remaining);
                        }
                        rc = read(fd, ptr, remaining);
                        if (rc == 0)
                                ERR("read() returned EOF prematurely");
                        if (rc == -1)
                                ERR("read() failed");
                }
                if (rc < remaining) {
                        fprintf(stdout,
                                "WARNING: Task %d, partial %s, %lld of %lld bytes at offset %lld\n",
                                rank,
                                access == WRITE ? "write()" : "read()",
                                rc, remaining,
                                param->offset + length - remaining);
                        if (param->singleXferAttempt == TRUE)
                                MPI_CHECK(MPI_Abort(MPI_COMM_WORLD, -1),
                                          "barrier error");
                        if (xferRetries > MAX_RETRY)
                                ERR("too many retries -- aborting");
                }
                assert(rc >= 0);
                assert(rc <= remaining);
                remaining -= rc;
                ptr += rc;
                xferRetries++;
        }
        return (length);
}
Esempio n. 16
0
void
process_args(int argc, char **argv, hydroparam_t * H) {
  int n = 1;
  char donnees[512];
  char config[512];

#if FTI==0
  default_values(H);

#ifdef MPI
  MPI_Comm_size(MPI_COMM_WORLD, &H->nproc);
  MPI_Comm_rank(MPI_COMM_WORLD, &H->mype);
#else
  H->nproc = 1;
  H->mype = 0;
#endif
  while (n < argc) {
    if (strcmp(argv[n], "--help") == 0) {
      usage();
      n++;
      continue;
    }
    if (strcmp(argv[n], "-v") == 0) {
      n++;
      H->prt++;
      continue;
    }
    if (strcmp(argv[n], "-i") == 0) {
      n++;
      strncpy(donnees, argv[n], 512);
      donnees[511] = 0;         // security
      n++;
      continue;
    }
    if (strcmp(argv[n], "-c") == 0) {
      n++;
      fprintf(stderr, "FTI is not available\n");
      n++;
      continue;
    }
    fprintf(stderr, "Key %s is unkown\n", argv[n]);
    n++;
  }
  if (donnees != NULL) {
    process_input(donnees, H);
  } else {
    fprintf(stderr, "Option -i is missing\n");
    exit(1);
  }
#endif
#if FTI>0
  H->prt=0;
  default_values(H);

  while (n < argc) {
    if (strcmp(argv[n], "--help") == 0) {
      usage();
      n++;
      continue;
    }
    if (strcmp(argv[n], "-v") == 0) {
      n++;
      H->prt++;
      continue;
    }
    if (strcmp(argv[n], "-i") == 0) {
      n++;
      strncpy(donnees, argv[n], 512);
      donnees[511] = 0;         // security
      n++;
      continue;
    }
    if (strcmp(argv[n], "-c") == 0) {
      n++;
      strncpy(config, argv[n], 512);
      config[511] = 0;         // security
      n++;
      continue;
      }
    fprintf(stderr, "Key %s is unkown\n", argv[n]);
    n++;
  }
  if (config != NULL) {
#ifdef MPI
    //FTI initialization
    FTI_Init(config, MPI_COMM_WORLD);
#else
    fprintf(stderr, "FTI need MPI\n", argv[n]);
#endif
  } else {
    fprintf(stderr, "Option -c is missing\n");
    exit(1);
  }
  default_values(H);

#ifdef MPI
  MPI_Comm_size(FTI_COMM_WORLD, &H->nproc);
  MPI_Comm_rank(FTI_COMM_WORLD, &H->mype);
#else
  H->nproc = 1;
  H->mype = 0;
#endif
  if (donnees != NULL) {
    process_input(donnees, H);
  } else {
    fprintf(stderr, "Option -i is missing\n");
    exit(1);
  }
#endif

  H->globnx = H->nx;
  H->globny = H->ny;
  H->box[XMIN_BOX] = 0;
  H->box[XMAX_BOX] = H->nx;
  H->box[YMIN_BOX] = 0;
  H->box[YMAX_BOX] = H->ny;

#ifdef MPI
  if (H->nproc > 1) {
#if FTI==0
    MPI_Barrier(MPI_COMM_WORLD);
#endif
#if FTI>0
    MPI_Barrier(FTI_COMM_WORLD);
#endif
    // first pass : determin our actual sub problem size
    CalcSubSurface(0, H->globnx, 0, H->globny, 0, H->nproc - 1, 0, H->box, H->mype, 0);
    // second pass : determin our neighbours
    CalcSubSurface(0, H->globnx, 0, H->globny, 0, H->nproc - 1, 0, H->box, H->mype, 1);

    H->nx = H->box[XMAX_BOX] - H->box[XMIN_BOX];
    H->ny = H->box[YMAX_BOX] - H->box[YMIN_BOX];
    printf("[%4d/%4d] x=%4d X=%4d y=%4d Y=%4d / u=%4d d=%4d l=%4d r=%4d \n", H->mype, H->nproc, H->box[XMIN_BOX], H->box[XMAX_BOX], H->box[YMIN_BOX], H->box[YMAX_BOX], H->box[UP_BOX], H->box[DOWN_BOX], H->box[LEFT_BOX], H->box[RIGHT_BOX]);

    if (H->nx <= 0) {
      printf("Decomposition not suited for this geometry along X: increase nx or change number of procs\n");
    }

    if (H->ny <= 0) {
      printf("Decomposition not suited for this geometry along Y: increase ny or change number of procs\n");
    }

    if (H->nx == 0 || H->ny == 0) {
#if FTI==0
      MPI_Abort(MPI_COMM_WORLD, 123);
#endif
#if FTI>0
      MPI_Abort(FTI_COMM_WORLD, 123);
#endif
    }

    // adapt the boundary conditions 
    if (H->box[LEFT_BOX] != -1) {
      H->boundary_left = 0;
    }
    if (H->box[RIGHT_BOX] != -1) {
      H->boundary_right = 0;
    }
    if (H->box[DOWN_BOX] != -1) {
      H->boundary_down = 0;
    }
    if (H->box[UP_BOX] != -1) {
      H->boundary_up = 0;
    }
  }
  fflush(stdout);
#endif

  if (H->nxystep == -1) {
    // default = full slab
    H->nxystep = (H->nx < H->ny) ? H->nx: H->ny;
  } else {
    if (H->nxystep > H->nx) H->nxystep = H->nx;
    if (H->nxystep > H->ny) H->nxystep = H->ny;
  }

  // small summary of the run conditions
  if (H->mype == 0) {
    printf("+-------------------+\n");
    printf("|GlobNx=%-7d     |\n", H->globnx);
    printf("|GlobNy=%-7d     |\n", H->globny);
    printf("|nx=%-7d         |\n", H->nx);
    printf("|ny=%-7d         |\n", H->ny);
    printf("|nxystep=%-7d    |\n", H->nxystep);
    printf("|tend=%-10.3f    |\n", H->tend);
    printf("|nstepmax=%-7d   |\n", H->nstepmax);
    printf("|noutput=%-7d    |\n", H->noutput);
    printf("|dtoutput=%-10.3f|\n", H->dtoutput);
    printf("+-------------------+\n");
  }
}
Esempio n. 17
0
/*
 * Creat and open a file through the POSIX interface.
 */
static void *POSIX_Create(char *testFileName, IOR_param_t * param)
{
        int fd_oflag = O_BINARY;
        int *fd;

        fd = (int *)malloc(sizeof(int));
        if (fd == NULL)
                ERR("Unable to malloc file descriptor");

        if (param->useO_DIRECT == TRUE)
                set_o_direct_flag(&fd_oflag);

#ifdef HAVE_LUSTRE_LUSTRE_USER_H
        if (param->lustre_set_striping) {
                /* In the single-shared-file case, task 0 has to creat the
                   file with the Lustre striping options before any other processes
                   open the file */
                if (!param->filePerProc && rank != 0) {
                        MPI_CHECK(MPI_Barrier(testComm), "barrier error");
                        fd_oflag |= O_RDWR;
                        *fd = open64(testFileName, fd_oflag, 0664);
                        if (*fd < 0)
                                ERR("open64() failed");
                } else {
                        struct lov_user_md opts = { 0 };

                        /* Setup Lustre IOCTL striping pattern structure */
                        opts.lmm_magic = LOV_USER_MAGIC;
                        opts.lmm_stripe_size = param->lustre_stripe_size;
                        opts.lmm_stripe_offset = param->lustre_start_ost;
                        opts.lmm_stripe_count = param->lustre_stripe_count;

                        /* File needs to be opened O_EXCL because we cannot set
                           Lustre striping information on a pre-existing file. */
                        fd_oflag |=
                            O_CREAT | O_EXCL | O_RDWR | O_LOV_DELAY_CREATE;
                        *fd = open64(testFileName, fd_oflag, 0664);
                        if (*fd < 0) {
                                fprintf(stdout, "\nUnable to open '%s': %s\n",
                                        testFileName, strerror(errno));
                                MPI_CHECK(MPI_Abort(MPI_COMM_WORLD, -1),
                                          "MPI_Abort() error");
                        } else if (ioctl(*fd, LL_IOC_LOV_SETSTRIPE, &opts)) {
                                char *errmsg = "stripe already set";
                                if (errno != EEXIST && errno != EALREADY)
                                        errmsg = strerror(errno);
                                fprintf(stdout,
                                        "\nError on ioctl for '%s' (%d): %s\n",
                                        testFileName, *fd, errmsg);
                                MPI_CHECK(MPI_Abort(MPI_COMM_WORLD, -1),
                                          "MPI_Abort() error");
                        }
                        if (!param->filePerProc)
                                MPI_CHECK(MPI_Barrier(testComm),
                                          "barrier error");
                }
        } else {
#endif                          /* HAVE_LUSTRE_LUSTRE_USER_H */
                fd_oflag |= O_CREAT | O_RDWR;
                *fd = open64(testFileName, fd_oflag, 0664);
                if (*fd < 0)
                        ERR("open64() failed");
#ifdef HAVE_LUSTRE_LUSTRE_USER_H
        }

        if (param->lustre_ignore_locks) {
                int lustre_ioctl_flags = LL_FILE_IGNORE_LOCK;
                if (ioctl(*fd, LL_IOC_SETFLAGS, &lustre_ioctl_flags) == -1)
                        ERR("ioctl(LL_IOC_SETFLAGS) failed");
        }
#endif                          /* HAVE_LUSTRE_LUSTRE_USER_H */

        return ((void *)fd);
}
Esempio n. 18
0
int main(int argc, char **argv)
{
    int err = 0;
    int toterr, size, rank, i, sumval;
    int *sendbuf;
    int *recvbuf;
    MPI_Comm comm;

    MPI_Init(&argc, &argv);
    comm = MPI_COMM_WORLD;

    MPI_Comm_size(comm, &size);
    MPI_Comm_rank(comm, &rank);

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
    /* MPI_Reduce_scatter block was added in MPI-2.2 */
    sendbuf = (int *) malloc(size * sizeof(int));
    recvbuf = (int *) malloc(size * sizeof(int));
    if (!sendbuf || !recvbuf) {
        err++;
        fprintf(stderr, "unable to allocate send/recv buffers, aborting");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    for (i=0; i<size; i++)
        sendbuf[i] = rank + i;

    MPI_Reduce_scatter_block(sendbuf, recvbuf, 1, MPI_INT, MPI_SUM, comm);

    sumval = size * rank + ((size - 1) * size)/2;
    if (recvbuf[0] != sumval) {
        err++;
        fprintf(stdout, "Did not get expected value for reduce scatter block\n");
        fprintf(stdout, "[%d] Got %d expected %d\n", rank, recvbuf[0], sumval);
    }

    free(sendbuf);

    /* let's try it again with MPI_IN_PLACE this time */
    for (i=0; i<size; i++)
        recvbuf[i] = rank + i;

    MPI_Reduce_scatter_block(MPI_IN_PLACE, recvbuf, 1, MPI_INT, MPI_SUM, comm);

    sumval = size * rank + ((size - 1) * size)/2;
    if (recvbuf[0] != sumval) {
        err++;
        fprintf(stdout, "Did not get expected value for reduce scatter block\n");
        fprintf(stdout, "[%d] Got %d expected %d\n", rank, recvbuf[0], sumval);
    }

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    if (MPI_SUCCESS == MPI_Reduce_scatter_block(recvbuf, recvbuf, 1, MPI_INT, MPI_SUM, comm))
        err++;

    free(recvbuf);
#endif

    MPI_Allreduce(&err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
    if (rank == 0 && toterr == 0) {
        printf(" No Errors\n");
    }
    MPI_Finalize();

    return toterr;
}
Esempio n. 19
0
int main(int argc, char **argv)
{
    int i, len, nkeys, flag, mynod, default_striping_factor=0, nprocs, errs = 0;
    MPI_File fh;
    MPI_Info info, info_used;
    char *filename, key[MPI_MAX_INFO_KEY], value[MPI_MAX_INFO_VAL];

    MPI_Init(&argc,&argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &mynod);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

/* process 0 takes the file name as a command-line argument and 
   broadcasts it to other processes */
    if (!mynod) {
	i = 1;
	while ((i < argc) && strcmp("-fname", *argv)) {
	    if (!strcmp("-v", *argv)) verbose = 1;
	    i++;
	    argv++;
	}
	if (i >= argc) {
	    fprintf(stderr, "\n*#  Usage: file_info [-v] -fname filename\n\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
	argv++;
	len = strlen(*argv);
	filename = (char *) malloc(len+1);
	strcpy(filename, *argv);
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
	MPI_Bcast(&verbose, 1, MPI_INT, 0, MPI_COMM_WORLD);
    }
    else {
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	filename = (char *) malloc(len+1);
	MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
	MPI_Bcast(&verbose, 1, MPI_INT, 0, MPI_COMM_WORLD);
    }

/* open the file with MPI_INFO_NULL */
    MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, 
                  MPI_INFO_NULL, &fh);

/* check the default values set by ROMIO */
    MPI_File_get_info(fh, &info_used);
    MPI_Info_get_nkeys(info_used, &nkeys);

    for (i=0; i<nkeys; i++) {
	MPI_Info_get_nthkey(info_used, i, key);
	MPI_Info_get(info_used, key, MPI_MAX_INFO_VAL-1, value, &flag);
#ifdef INFO_DEBUG
	if (!mynod) 
	    fprintf(stderr, "Process %d, Default:  key = %s, value = %s\n", mynod, 
                key, value);
#endif
	if (!strcmp("striping_factor", key)) {
	    default_striping_factor = atoi(value);
	    /* no check */
	}
	else if (!strcmp("cb_buffer_size", key)) {
	    if (atoi(value) != DFLT_CB_BUFFER_SIZE) {
		errs++;
		if (verbose) fprintf(stderr, "cb_buffer_size is %d; should be %d\n",
				     atoi(value), DFLT_CB_BUFFER_SIZE);
	    }
	}
	else if (!strcmp("romio_cb_read", key)) {
	    if (strcmp(DFLT_ROMIO_CB_READ, value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_cb_read is set to %s; should be %s\n",
				     value, DFLT_ROMIO_CB_READ);
	    }
	}
	else if (!strcmp("romio_cb_write", key)) {
	    if (strcmp(DFLT_ROMIO_CB_WRITE, value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_cb_write is set to %s; should be %s\n",
				     value, DFLT_ROMIO_CB_WRITE);
	    }
	}
	else if (!strcmp("cb_nodes", key)) {
	    /* unreliable test -- just ignore value */
	}
	else if (!strcmp("romio_no_indep_rw", key)) {
	    if (strcmp("false", value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_no_indep_rw is set to %s; should be %s\n",
				     value, "false");
	    }
	}
	else if (!strcmp("ind_rd_buffer_size", key)) {
	    if (atoi(value) != DFLT_IND_RD_BUFFER_SIZE) {
		errs++;
		if (verbose) fprintf(stderr, "ind_rd_buffer_size is %d; should be %d\n",
				     atoi(value), DFLT_IND_RD_BUFFER_SIZE);
	    }
	}
	else if (!strcmp("ind_wr_buffer_size", key)) {
	    if (atoi(value) != DFLT_IND_WR_BUFFER_SIZE) {
		errs++;
		if (verbose) fprintf(stderr, "ind_wr_buffer_size is %d; should be %d\n",
				     atoi(value), DFLT_IND_WR_BUFFER_SIZE);
	    }
	}
	else if (!strcmp("romio_ds_read", key)) {
	    if (strcmp("automatic", value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_ds_read is set to %s; should be %s\n",
				     value, "automatic");
	    }
	}
	else if (!strcmp("romio_ds_write", key)) {
	    /* Unreliable test -- value is file system dependent.  Ignore. */
	}
	else if (!strcmp("cb_config_list", key)) {
#ifndef SKIP_CB_CONFIG_LIST_TEST
	    if (strcmp("*:1", value)) {
		errs++;
		if (verbose) fprintf(stderr, "cb_config_list is set to %s; should be %s\n",
				     value, "*:1");
	    }
#endif
	}
	/* don't care about the defaults for these keys */
	else if (!strcmp("romio_cb_pfr", key)) {
	}
	else if (!strcmp("romio_cb_fr_types", key)) {
	}
	else if (!strcmp("romio_cb_fr_alignment", key)) {
	}
	else if (!strcmp("romio_cb_ds_threshold", key)) {
	}
	else if (!strcmp("romio_cb_alltoall", key)) {
	}
	else {
	    if (verbose) fprintf(stderr, "unexpected key %s (not counted as an error)\n", key);
	}
    }
    MPI_Info_free(&info_used);

    MPI_File_close(&fh);
    
/* delete the file */
    if (!mynod) MPI_File_delete(filename, MPI_INFO_NULL);
    MPI_Barrier(MPI_COMM_WORLD);

/* set new info values. */

    MPI_Info_create(&info);

/* The following four hints are accepted on all machines. They can
   be specified at file-open time or later (any number of times). */

    /* buffer size for collective I/O */
    MPI_Info_set(info, "cb_buffer_size", "8388608");

    /* number of processes that actually perform I/O in collective I/O */
    sprintf(value, "%d", nprocs/2);
    MPI_Info_set(info, "cb_nodes", value);

    /* buffer size for data sieving in independent reads */
    MPI_Info_set(info, "ind_rd_buffer_size", "2097152");

    /* buffer size for data sieving in independent writes */
    MPI_Info_set(info, "ind_wr_buffer_size", "1048576");


/* The following three hints related to file striping are accepted only 
   on Intel PFS and IBM PIOFS file systems and are ignored elsewhere. 
   They can be specified only at file-creation time; if specified later 
   they will be ignored. */

    /* number of I/O devices across which the file will be striped.
       accepted only if 0 < value < default_striping_factor; 
       ignored otherwise */
    if (default_striping_factor - 1 > 0) {
        sprintf(value, "%d", default_striping_factor-1);
        MPI_Info_set(info, "striping_factor", value);
    }
    else {
        sprintf(value, "%d", default_striping_factor);
        MPI_Info_set(info, "striping_factor", value);
    }

    /* the striping unit in bytes */
    MPI_Info_set(info, "striping_unit", "131072");

#ifndef SKIP_CB_CONFIG_LIST_TEST
    /* set the cb_config_list so we'll get deterministic cb_nodes output */
    MPI_Info_set(info, "cb_config_list", "*:*");
#endif

    /* the I/O device number from which to start striping the file.
       accepted only if 0 <= value < default_striping_factor; 
       ignored otherwise */
    sprintf(value, "%d", default_striping_factor-2);
    MPI_Info_set(info, "start_iodevice", value);


/* The following hint about PFS server buffering is accepted only on 
   Intel PFS. It can be specified anytime. */ 
    MPI_Info_set(info, "pfs_svr_buf", "true");

/* open the file and set new info */
    MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, 
                  info, &fh);

/* check the values set */
    MPI_File_get_info(fh, &info_used);
    MPI_Info_get_nkeys(info_used, &nkeys);

    for (i=0; i<nkeys; i++) {
	MPI_Info_get_nthkey(info_used, i, key);
	MPI_Info_get(info_used, key, MPI_MAX_INFO_VAL-1, value, &flag);
#ifdef INFO_DEBUG	
	if (!mynod) fprintf(stderr, "Process %d, key = %s, value = %s\n", mynod, 
                key, value);
#endif
	if (!strcmp("striping_factor", key)) {
	    if ((default_striping_factor - 1 > 0) && (atoi(value) != default_striping_factor-1)) {
		errs++;
		if (verbose) fprintf(stderr, "striping_factor is %d; should be %d\n",
				     atoi(value), default_striping_factor-1);
	    }
	    else if (atoi(value) != default_striping_factor) {
		errs++;
		if (verbose) fprintf(stderr, "striping_factor is %d; should be %d\n",
				     atoi(value), default_striping_factor);
	    }
	}
	else if (!strcmp("cb_buffer_size", key)) {
	    if (atoi(value) != 8388608) {
		errs++;
		if (verbose) fprintf(stderr, "cb_buffer_size is %d; should be %d\n",
				     atoi(value), 8388608);
	    }
	}
	else if (!strcmp("romio_cb_read", key)) {
	    if (strcmp(DFLT_ROMIO_CB_READ, value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_cb_read is set to %s; should be %s\n",
				     value, DFLT_ROMIO_CB_READ);
	    }
	}
	else if (!strcmp("romio_cb_write", key)) {
	    if (strcmp(DFLT_ROMIO_CB_WRITE, value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_cb_write is set to %s; should be %s\n",
				     value, DFLT_ROMIO_CB_WRITE);
	    }
	}
	else if (!strcmp("cb_nodes", key)) {
	    if (atoi(value) != (nprocs/2)) {
		errs++;
		if (verbose) fprintf(stderr, "cb_nodes is %d; should be %d\n", atoi(value),
				     nprocs/2);
	    }
	}
	else if (!strcmp("romio_no_indep_rw", key)) {
	    if (strcmp("false", value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_no_indep_rw is set to %s; should be %s\n",
				     value, "false");
	    }
	}
	else if (!strcmp("ind_rd_buffer_size", key)) {
	    if (atoi(value) != 2097152) {
		errs++;
		if (verbose) fprintf(stderr, "ind_rd_buffer_size is %d; should be %d\n",
				     atoi(value), 2097152);
	    }
	}
	else if (!strcmp("ind_wr_buffer_size", key)) {
	    if (atoi(value) != 1048576) {
		errs++;
		if (verbose) fprintf(stderr, "ind_wr_buffer_size is %d; should be %d\n",
				     atoi(value), 1048576);
	    }
	}
	else if (!strcmp("romio_ds_read", key)) {
	    if (strcmp("automatic", value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_ds_read is set to %s; should be %s\n",
				     value, "automatic");
	    }
	}
	else if (!strcmp("romio_ds_write", key)) {
	    /* Unreliable test -- value is file system dependent.  Ignore. */
	}
	else if (!strcmp("cb_config_list", key)) {
#ifndef SKIP_CB_CONFIG_LIST_TEST
	    if (strcmp("*:*", value)) {
		errs++;
		if (verbose) fprintf(stderr, "cb_config_list is set to %s; should be %s\n",
				     value, "*:*");
	    }
#endif
	}
	else if (!strcmp("romio_cb_pfr", key)) {
   	    if(strcmp("disable", value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_cb_pfr is set to %s; should be %s\n",
				     value, "automatic");
	    }
	}
	else if (!strcmp("romio_cb_fr_types", key)) {
   	    if(strcmp("aar", value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_cb_fr_types is set to %s; should be %s\n",
				     value, "aar");
	    }
	}
	else if (!strcmp("romio_cb_fr_alignment", key)) {
   	    if(strcmp("1", value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_cb_fr_alignment is set to %s; should be %s\n",
				     value, "1");
	    }
	}
	else if (!strcmp("romio_cb_ds_threshold", key)) {
   	    if(strcmp("0", value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_cb_ds_threshold is set to %s; should be %s\n",
				     value, "0");
	    }
	}
	else if (!strcmp("romio_cb_alltoall", key)) {
   	    if(strcmp("automatic", value)) {
		errs++;
		if (verbose) fprintf(stderr, "romio_cb_alltoall is set to %s; should be %s\n",
				     value, "automatic");
	    }
	}

	else {
	    if (verbose) fprintf(stderr, "unexpected key %s (not counted as an error)\n", key);
	}
    }
	    
    /* Q: SHOULD WE BOTHER LOOKING AT THE OTHER PROCESSES? */
    if (!mynod) {
	if (errs) fprintf(stderr, "Found %d errors.\n", errs);
	else printf(" No Errors\n");
    }
    
    MPI_File_close(&fh);
    free(filename);
    MPI_Info_free(&info_used);
    MPI_Info_free(&info);
    MPI_Finalize();
    return 0;
}
Esempio n. 20
0
int main (int argc, char *argv[])
{
void inidat(), prtdat(), update();
float  u[2][NXPROB][NYPROB];        /* array for grid */
int	taskid,                     /* this task's unique id */
	numworkers,                 /* number of worker processes */
	numtasks,                   /* number of tasks */
	averow,rows,offset,extra,   /* for sending rows of data */
	dest, source,               /* to - from for message send-receive */
	left,right,        /* neighbor tasks */
	msgtype,                    /* for message types */
	rc,start,end,               /* misc */
	i,ix,iy,iz,it;              /* loop variables */
MPI_Status status;


/* First, find out my taskid and how many tasks are running */
   MPI_Init(&argc,&argv);
   MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
   MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
   numworkers = numtasks-1;

   if (taskid == MASTER) {
      /************************* master code *******************************/
      /* Check if numworkers is within range - quit if not */
      if ((numworkers > MAXWORKER) || (numworkers < MINWORKER)) {
         printf("ERROR: the number of tasks must be between %d and %d.\n",
                 MINWORKER+1,MAXWORKER+1);
         printf("Quitting...\n");
         MPI_Abort(MPI_COMM_WORLD, rc);
         exit(1);
         }
      printf ("Starting mpi_heat2D with %d worker tasks.\n", numworkers);

      /* Initialize grid */
      printf("Grid size: X= %d  Y= %d  Time steps= %d\n",NXPROB,NYPROB,STEPS);
      printf("Initializing grid and writing initial.dat file...\n");
      inidat(NXPROB, NYPROB, u);
      prtdat(NXPROB, NYPROB, u, "initial.dat");

      /* Distribute work to workers.  Must first figure out how many rows to */
      /* send and what to do with extra rows.  */
      averow = NXPROB/numworkers;
      extra = NXPROB%numworkers;
      offset = 0;
      for (i=1; i<=numworkers; i++)
      {
         rows = (i <= extra) ? averow+1 : averow; 
         /* Tell each worker who its neighbors are, since they must exchange */
         /* data with each other. */  
         if (i == 1) 
            left = NONE;
         else
            left = i - 1;
         if (i == numworkers)
            right = NONE;
         else
            right = i + 1;
         /*  Now send startup information to each worker  */
         dest = i;
         MPI_Send(&offset, 1, MPI_INT, dest, BEGIN, MPI_COMM_WORLD);
         MPI_Send(&rows, 1, MPI_INT, dest, BEGIN, MPI_COMM_WORLD);
         MPI_Send(&left, 1, MPI_INT, dest, BEGIN, MPI_COMM_WORLD);
         MPI_Send(&right, 1, MPI_INT, dest, BEGIN, MPI_COMM_WORLD);
         MPI_Send(&u[0][offset][0], rows*NYPROB, MPI_FLOAT, dest, BEGIN, 
                  MPI_COMM_WORLD);
         printf("Sent to task %d: rows= %d offset= %d ",dest,rows,offset);
         printf("left= %d right= %d\n",left,right);
         offset = offset + rows;
      }
      /* Now wait for results from all worker tasks */
      for (i=1; i<=numworkers; i++)
      {
         source = i;
         msgtype = DONE;
         MPI_Recv(&offset, 1, MPI_INT, source, msgtype, MPI_COMM_WORLD, 
                  &status);
         MPI_Recv(&rows, 1, MPI_INT, source, msgtype, MPI_COMM_WORLD, &status);
         MPI_Recv(&u[0][offset][0], rows*NYPROB, MPI_FLOAT, source,
                  msgtype, MPI_COMM_WORLD, &status);
      }

      /* Write final output, call X graph and finalize MPI */
      printf("Writing final.dat file and generating graph...\n");
      prtdat(NXPROB, NYPROB, &u[0][0][0], "final.dat");
      printf("Click on MORE button to view initial/final states.\n");
      printf("Click on EXIT button to quit program.\n");
      //draw_heat(NXPROB,NYPROB);
      MPI_Finalize();
   }   /* End of master code */



   /************************* workers code **********************************/
   if (taskid != MASTER) 
   {
      /* Initialize everything - including the borders - to zero */
      for (iz=0; iz<2; iz++)
         for (ix=0; ix<NXPROB; ix++) 
            for (iy=0; iy<NYPROB; iy++) 
               u[iz][ix][iy] = 0.0;

      /* Receive my offset, rows, neighbors and grid partition from master */
      source = MASTER;
      msgtype = BEGIN;
      MPI_Recv(&offset, 1, MPI_INT, source, msgtype, MPI_COMM_WORLD, &status);
      MPI_Recv(&rows, 1, MPI_INT, source, msgtype, MPI_COMM_WORLD, &status);
      MPI_Recv(&left, 1, MPI_INT, source, msgtype, MPI_COMM_WORLD, &status);
      MPI_Recv(&right, 1, MPI_INT, source, msgtype, MPI_COMM_WORLD, &status);
      MPI_Recv(&u[0][offset][0], rows*NYPROB, MPI_FLOAT, source, msgtype, 
               MPI_COMM_WORLD, &status);

      /* Determine border elements.  Need to consider first and last columns. */
      /* Obviously, row 0 can't exchange with row 0-1.  Likewise, the last */
      /* row can't exchange with last+1.  */
      start=offset;
      end=offset+rows-1;
      if (offset==0) 
         start=1;
      if ((offset+rows)==NXPROB) 
         end--;
      printf("task=%d  start=%d  end=%d\n",taskid,start,end);

      /* Begin doing STEPS iterations.  Must communicate border rows with */
      /* neighbors.  If I have the first or last grid row, then I only need */
      /*  to  communicate with one neighbor  */
      printf("Task %d received work. Beginning time steps...\n",taskid);
      iz = 0;
      for (it = 1; it <= STEPS; it++)
      {
         if (left != NONE)
         {
            MPI_Send(&u[iz][offset][0], NYPROB, MPI_FLOAT, left,
                     RTAG, MPI_COMM_WORLD);
            source = left;
            msgtype = LTAG;
            MPI_Recv(&u[iz][offset-1][0], NYPROB, MPI_FLOAT, source,
                      msgtype, MPI_COMM_WORLD, &status);
         }
         if (right != NONE)
         {
            MPI_Send(&u[iz][offset+rows-1][0], NYPROB, MPI_FLOAT, right,
                      LTAG, MPI_COMM_WORLD);
            source = right;
            msgtype = RTAG;
            MPI_Recv(&u[iz][offset+rows][0], NYPROB, MPI_FLOAT, source, msgtype,
                      MPI_COMM_WORLD, &status);
         }
         /* Now call update to update the value of grid points */
         update(start,end,NYPROB,&u[iz][0][0],&u[1-iz][0][0]);
         iz = 1 - iz;
      }
      /* Finally, send my portion of final results back to master */
      MPI_Send(&offset, 1, MPI_INT, MASTER, DONE, MPI_COMM_WORLD);
      MPI_Send(&rows, 1, MPI_INT, MASTER, DONE, MPI_COMM_WORLD);
      MPI_Send(&u[iz][offset][0], rows*NYPROB, MPI_FLOAT, MASTER, DONE, 
               MPI_COMM_WORLD);
      MPI_Finalize();
   }
}
Esempio n. 21
0
/* STUB */
int PMPI_Graphdims_get ( MPI_Comm comm, int *nnodes, int *nedges )
{
    fprintf(stderr,"%s:%d: NOT IMPLEMENTED\n",__FILE__,__LINE__);
    return MPI_Abort((MPI_Comm)NULL, MPI_UNDEFINED);
}
Esempio n. 22
0
/* STUB */
int PMPIO_Test(MPIO_Request *request, int *flag, MPI_Status *status)
{
  fprintf(stderr,"%s:%d: NOT IMPLEMENTED\n",__FILE__,__LINE__);
  return MPI_Abort((MPI_Comm)0, MPI_UNDEFINED); 
}
Esempio n. 23
0
int main (int argc,char*argv[])
{
	int numtasks, taskid, rc, dest, offset, i, j, tag1, tag2, source, chunksize;
	float mysum, sum;
	float update(int myoffset,int chunk,int myid);

	MPI_Init(&argc,&argv);	
	MPI_Status status;

	/***** Initializations *****/
	MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
	if(numtasks%4 != 0){
		printf("Quitting. Number of MPI tasks must be divisible by 4.\n");
		MPI_Abort(MPI_COMM_WORLD, rc);
		exit(0);
	}
	MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
	printf("MPI task %d has started...\n", taskid);
	chunksize = (ARRAYSIZE / numtasks);
	tag2 = 1;
	tag1 = 2;

	/***** Master task only ******/
	if(taskid == MASTER){
		/* Initialize the array */
		sum = 0;
		for(i=0; i<ARRAYSIZE; i++){
			data[i] =  i *1.0;
			sum = sum + data[i];
		}
		printf("Initialized array sum = %e\n",sum);
			
		/* Send each task its portion of the array ­ master keeps 1st part */
		offset = chunksize;
		for(dest=1; dest<numtasks; dest++){
			MPI_Send(&offset,1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
			MPI_Send(&data[offset], chunksize, MPI_FLOAT, dest, tag2, MPI_COMM_WORLD);
			printf("Sent   %d   elements   to   task   %d   offset= %d\n",chunksize,dest,offset);
			offset = offset + chunksize;
		}
		/* Master does its part of the work */
		offset = 0;
		mysum = update(offset, chunksize, taskid);
			
		/* Wait to receive results from each task */
		for(i=1; i<numtasks; i++){
			source = i;
			MPI_Recv(&offset,1, MPI_INT, source, tag1, MPI_COMM_WORLD,&status);
			MPI_Recv(&data[offset], chunksize, MPI_FLOAT, source, tag2,
			MPI_COMM_WORLD,&status);
		}

		// Para que la suma se realice correctamente hay que poner un MPI_Reduce
		MPI_Reduce(&mysum, &sum, 1, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
		
		printf("Sample results: \n");
		offset = 0;
		for(i=0; i<numtasks; i++){
			for(j=0; j<5; j++)
				printf("  %e",data[offset+j]);
			printf("\n");
			offset = offset + chunksize;
		}
		printf("*** Final sum= %e ***\n",sum);
	}/* end of master section */

	/***** Non­master tasks only *****/
	if(taskid > MASTER){
		/* Receive my portion of array from the master task */
		source = MASTER;
		MPI_Recv(&offset,1, MPI_INT, source, tag1, MPI_COMM_WORLD,&status);
		MPI_Recv(&data[offset], chunksize, MPI_FLOAT, source, tag2,
		MPI_COMM_WORLD,&status);
		mysum = update(offset, chunksize, taskid);

		/* Send my results back to the master task */
		dest = MASTER;
		MPI_Send(&offset,1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
		MPI_Send(&data[offset], chunksize, MPI_FLOAT, MASTER, tag2, MPI_COMM_WORLD);
        MPI_Reduce(&mysum,&sum,1, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
	}/* end of non­master */
	
	MPI_Finalize();
	
}/* end of main */
Esempio n. 24
0
File: chunk.c Progetto: kraused/pvo
int main( int argc, char** argv )
{
    int ni;
    long N;
    pvo_cookie_t  cookie;
    pvo_float3_t* pts;
    int*          cia;
    int*          cja;
    uint8_t*      types;
    pvo_vtu_file_t fh;
    double* U;
    float*  V;
    int*    ranks;
    double t0, t1, t2, t3;
    long i, nnodes, ncells;
    int N_u, N_v, N_r;
    double r_min, r_max, gamma;

    MPI_Init( &argc, &argv );
    pvo_init( MPI_COMM_WORLD );


    MPI_Barrier( MPI_COMM_WORLD );
    t0 = MPI_Wtime();

    // Minimal and maximal value for the radius
    r_min = parse_cmdline_double();
    r_max = parse_cmdline_double();

    gamma = parse_cmdline_double();

    // Number of grid cells for each direction
    N_u = parse_cmdline_double();
    N_v = parse_cmdline_double();
    N_r = parse_cmdline_double();

    /* The data is decomposed by slicing in the radial
       direction
     */
    decompose( &N_r, &r_min, &r_max );

    nnodes = (N_u+1)*(N_v+1)*(N_r+1);
    ncells =  N_u   * N_v   * N_r   ;

    pts   = malloc( nnodes*sizeof(pvo_float3_t) );
    cia   = malloc( (ncells+1)*sizeof(int) );
    cja   = malloc( 8*ncells*sizeof(int) );
    U     = malloc( nnodes*3*sizeof(double) );
    V     = malloc( ncells*sizeof(float) );
    types = malloc( ncells*sizeof(uint8_t) );
    ranks = malloc( ncells*sizeof(int) );

    ni = parse_cmdline_int();

    if( 0 == pvo_world_rank() )
    {
        printf( " +-----------------------------------------+\n" );
        printf( " |         PVO CHUNK VTU BENCHMARK         |\n" );
        printf( " +-----------------------------------------+\n" );
        printf( "\n");
        printf( " PVO_DEFAULT_LOW_IO_LAYER   : \"%s\"\n", str_low_io_layer[PVO_DEFAULT_LOW_IO_LAYER] );
        printf( " no of cores                : %d\n", pvo_world_size() );
        printf( " no of nodes                : %d\n", nnodes );
        printf( " no of cells                : %d\n", ncells );
        printf( " no of islands              : %d\n", ni );
    }

    create_chunk( N_u, N_v, N_r, r_min, r_max, gamma, nnodes, pts, ncells, cia, cja, types, U, V );
    for( i = 0; i < ncells; ++i )
       ranks[i] = pvo_world_rank();

    if( -1 == pvo_cookie_create( pvo_world_rank()%ni, &cookie ))
        MPI_Abort( MPI_COMM_WORLD, __LINE__ );

    if( -1 == pvo_cookie_insert_var( cookie, PVO_VAR_NODEDATA, PVO_VAR_FLOAT64, 3, "U", U ))
        MPI_Abort( MPI_COMM_WORLD, __LINE__ );

    if( -1 == pvo_cookie_insert_var( cookie, PVO_VAR_CELLDATA, PVO_VAR_FLOAT32, 1, "V", V ))
        MPI_Abort( MPI_COMM_WORLD, __LINE__ );

    if( -1 == pvo_cookie_insert_var( cookie, PVO_VAR_CELLDATA, PVO_VAR_INT32, 1, "ranks", ranks ))
        MPI_Abort( MPI_COMM_WORLD, __LINE__ );

    if( -1 == pvo_vtu_file_open( "chunk", cookie, nnodes, pts, ncells, cia, cja, types, &fh ))
        MPI_Abort( MPI_COMM_WORLD, __LINE__ );

    MPI_Barrier( MPI_COMM_WORLD );
    t1 = MPI_Wtime();

    pvo_file_write( (pvo_file_t )fh );

    MPI_Barrier( MPI_COMM_WORLD );
    t2 = MPI_Wtime();

    if( 0 == pvo_world_rank() )
        printf( " time [sec]                 : %f\n", t2-t1 );

     N = (3*4/*pts*/ + 3*8/*U*/)*nnodes + (4/*cia*/ + 8*4/*cja*/ + 1/*types*/ + 4/*V*/ + 4/*ranks*/)*ncells;
     MPI_Allreduce( MPI_IN_PLACE, &N, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
    if( 0 == pvo_world_rank() )
        printf( " bandwidth [MB/sec]         : %f\n", (1.0*N)/(1048576*(t2-t1)) );

    if( -1 == pvo_vtu_file_close( fh ))
        MPI_Abort( MPI_COMM_WORLD, __LINE__ );

    if( -1 == pvo_cookie_delete( cookie ))
        MPI_Abort( MPI_COMM_WORLD, __LINE__ );

    MPI_Barrier( MPI_COMM_WORLD );
    t3 = MPI_Wtime();

    if( 0 == pvo_world_rank() )
    {
        printf( " total execution time [sec] : %f\n", t3-t0 );
        printf( "\n" );
    }

    pvo_quit();
    return MPI_Finalize();
}
unsigned long CSysSolve::CG_LinSolver(const CSysVector & b, CSysVector & x, CMatrixVectorProduct & mat_vec,
                                           CPreconditioner & precond, su2double tol, unsigned long m, bool monitoring) {
	
int rank = 0;

#ifdef HAVE_MPI
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
  
  /*--- Check the subspace size ---*/
  if (m < 1) {
    if (rank == MASTER_NODE) cerr << "CSysSolve::ConjugateGradient: illegal value for subspace size, m = " << m << endl;
#ifndef HAVE_MPI
    exit(EXIT_FAILURE);
#else
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Abort(MPI_COMM_WORLD,1);
    MPI_Finalize();
#endif
  }
  
  CSysVector r(b);
  CSysVector A_p(b);
  
  /*--- Calculate the initial residual, compute norm, and check if system is already solved ---*/
  mat_vec(x, A_p);
  
  r -= A_p; // recall, r holds b initially
  su2double norm_r = r.norm();
  su2double norm0 = b.norm();
  if ( (norm_r < tol*norm0) || (norm_r < eps) ) {
    if (rank == MASTER_NODE) cout << "CSysSolve::ConjugateGradient(): system solved by initial guess." << endl;
    return 0;
  }
  
  su2double alpha, beta, r_dot_z;
  CSysVector z(r);
  precond(r, z);
  CSysVector p(z);
  
  /*--- Set the norm to the initial initial residual value ---*/
  norm0 = norm_r;
  
  /*--- Output header information including initial residual ---*/
  int i = 0;
  if ((monitoring) && (rank == MASTER_NODE)) {
    WriteHeader("CG", tol, norm_r);
    WriteHistory(i, norm_r, norm0);
  }
  
  /*---  Loop over all search directions ---*/
  for (i = 0; i < (int)m; i++) {
    
    /*--- Apply matrix to p to build Krylov subspace ---*/
    mat_vec(p, A_p);
    
    /*--- Calculate step-length alpha ---*/
    r_dot_z = dotProd(r, z);
    alpha = dotProd(A_p, p);
    alpha = r_dot_z / alpha;
    
    /*--- Update solution and residual: ---*/
    x.Plus_AX(alpha, p);
    r.Plus_AX(-alpha, A_p);
    
    /*--- Check if solution has converged, else output the relative residual if necessary ---*/
    norm_r = r.norm();
    if (norm_r < tol*norm0) break;
    if (((monitoring) && (rank == MASTER_NODE)) && ((i+1) % 5 == 0)) WriteHistory(i+1, norm_r, norm0);
    
    precond(r, z);
    
    /*--- Calculate Gram-Schmidt coefficient beta,
		 beta = dotProd(r_{i+1}, z_{i+1}) / dotProd(r_{i}, z_{i}) ---*/
    beta = 1.0 / r_dot_z;
    r_dot_z = dotProd(r, z);
    beta *= r_dot_z;
    
    /*--- Gram-Schmidt orthogonalization; p = beta *p + z ---*/
    p.Equals_AX_Plus_BY(beta, p, 1.0, z);
  }
  

  
  if ((monitoring) && (rank == MASTER_NODE)) {
    cout << "# Conjugate Gradient final (true) residual:" << endl;
    cout << "# Iteration = " << i << ": |res|/|res0| = "  << norm_r/norm0 << ".\n" << endl;
  }
  
//  /*--- Recalculate final residual (this should be optional) ---*/
//  mat_vec(x, A_p);
//  r = b;
//  r -= A_p;
//  su2double true_res = r.norm();
//  
//  if (fabs(true_res - norm_r) > tol*10.0) {
//    if (rank == MASTER_NODE) {
//      cout << "# WARNING in CSysSolve::ConjugateGradient(): " << endl;
//      cout << "# true residual norm and calculated residual norm do not agree." << endl;
//      cout << "# true_res - calc_res = " << true_res - norm_r << endl;
//    }
//  }
	
	return (unsigned long) i;
  
}
Esempio n. 26
0
static void DTAR_abort(int code)
{
    MPI_Abort(MPI_COMM_WORLD, code);
    exit(code);
}
unsigned long CSysSolve::BCGSTAB_LinSolver(const CSysVector & b, CSysVector & x, CMatrixVectorProduct & mat_vec,
                                 CPreconditioner & precond, su2double tol, unsigned long m, su2double *residual, bool monitoring) {
	
  int rank = 0;
#ifdef HAVE_MPI
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
  
  /*--- Check the subspace size ---*/
  
  if (m < 1) {
    if (rank == MASTER_NODE) cerr << "CSysSolve::BCGSTAB: illegal value for subspace size, m = " << m << endl;
#ifndef HAVE_MPI
    exit(EXIT_FAILURE);
#else
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Abort(MPI_COMM_WORLD,1);
    MPI_Finalize();
#endif
  }
	
  CSysVector r(b);
  CSysVector r_0(b);
  CSysVector p(b);
	CSysVector v(b);
  CSysVector s(b);
	CSysVector t(b);
	CSysVector phat(b);
	CSysVector shat(b);
  CSysVector A_x(b);
  
  /*--- Calculate the initial residual, compute norm, and check if system is already solved ---*/
  
	mat_vec(x, A_x);
  r -= A_x; r_0 = r; // recall, r holds b initially
  su2double norm_r = r.norm();
  su2double norm0 = b.norm();
  if ( (norm_r < tol*norm0) || (norm_r < eps) ) {
    if (rank == MASTER_NODE) cout << "CSysSolve::BCGSTAB(): system solved by initial guess." << endl;
    return 0;
  }
	
	/*--- Initialization ---*/
  
  su2double alpha = 1.0, beta = 1.0, omega = 1.0, rho = 1.0, rho_prime = 1.0;
	
  /*--- Set the norm to the initial initial residual value ---*/
  
  norm0 = norm_r;
  
  /*--- Output header information including initial residual ---*/
  
  int i = 0;
  if ((monitoring) && (rank == MASTER_NODE)) {
    WriteHeader("BCGSTAB", tol, norm_r);
    WriteHistory(i, norm_r, norm0);
  }
	
  /*---  Loop over all search directions ---*/
  
  for (i = 0; i < (int)m; i++) {
		
		/*--- Compute rho_prime ---*/
    
		rho_prime = rho;
		
		/*--- Compute rho_i ---*/
    
		rho = dotProd(r, r_0);
		
		/*--- Compute beta ---*/
    
		beta = (rho / rho_prime) * (alpha /omega);
		
		/*--- p_{i} = r_{i-1} + beta * p_{i-1} - beta * omega * v_{i-1} ---*/
    
		su2double beta_omega = -beta*omega;
		p.Equals_AX_Plus_BY(beta, p, beta_omega, v);
		p.Plus_AX(1.0, r);
		
		/*--- Preconditioning step ---*/
    
		precond(p, phat);
		mat_vec(phat, v);

		/*--- Calculate step-length alpha ---*/
    
    su2double r_0_v = dotProd(r_0, v);
    alpha = rho / r_0_v;
    
		/*--- s_{i} = r_{i-1} - alpha * v_{i} ---*/
    
		s.Equals_AX_Plus_BY(1.0, r, -alpha, v);
		
		/*--- Preconditioning step ---*/
    
		precond(s, shat);
		mat_vec(shat, t);
    
		/*--- Calculate step-length omega ---*/
    
    omega = dotProd(t, s) / dotProd(t, t);
    
		/*--- Update solution and residual: ---*/
    
    x.Plus_AX(alpha, phat); x.Plus_AX(omega, shat);
		r.Equals_AX_Plus_BY(1.0, s, -omega, t);
    
    /*--- Check if solution has converged, else output the relative residual if necessary ---*/
    
    norm_r = r.norm();
    if (norm_r < tol*norm0) break;
    if (((monitoring) && (rank == MASTER_NODE)) && ((i+1) % 50 == 0) && (rank == MASTER_NODE)) WriteHistory(i+1, norm_r, norm0);
    
  }
	  
  if ((monitoring) && (rank == MASTER_NODE)) {
    cout << "# BCGSTAB final (true) residual:" << endl;
    cout << "# Iteration = " << i << ": |res|/|res0| = "  << norm_r/norm0 << ".\n" << endl;
  }
	
//  /*--- Recalculate final residual (this should be optional) ---*/
//	mat_vec(x, A_x);
//  r = b; r -= A_x;
//  su2double true_res = r.norm();
//  
//  if ((fabs(true_res - norm_r) > tol*10.0) && (rank == MASTER_NODE)) {
//    cout << "# WARNING in CSysSolve::BCGSTAB(): " << endl;
//    cout << "# true residual norm and calculated residual norm do not agree." << endl;
//    cout << "# true_res - calc_res = " << true_res <<" "<< norm_r << endl;
//  }
	
  (*residual) = norm_r;
	return (unsigned long) i;
}
Esempio n. 28
0
void
tw_net_abort(void)
{
  MPI_Abort(MPI_COMM_ROSS, 1);
  exit(1);
}
Esempio n. 29
0
void ss_abort(int error)
{
	MPI_Abort(MPI_COMM_WORLD, error);
}
Esempio n. 30
0
/* STUB */
int PMPI_File_sync(MPI_File fh)
{
  fprintf(stderr,"%s:%d: NOT IMPLEMENTED\n",__FILE__,__LINE__);
  return MPI_Abort((MPI_Comm)NULL, MPI_UNDEFINED); 
}