Пример #1
0
MDINLINE void halo_copy_vector(void *r_buffer, void *s_buffer, int count, Fieldtype type, int vflag) {
  int i, j;
  void *dest, *src;

  int vblocks = type->vblocks;
  int vstride = type->vstride;
  int vskip   = type->vskip;
  int extent  = type->extent;

  HALO_TRACE(fprintf(stderr, "%d: halo_copy_vector %p %p vblocks=%d vstride=%d vskip=%d extent=%d subtype_extent=%d\n",this_node,r_buffer,s_buffer,vblocks,vstride,vskip,extent,type->subtype->extent));

  if (vflag){
    vskip *= type->subtype->extent;
  }  

  for (i=0; i<count; i++, s_buffer+=extent, r_buffer+=extent) {
    for (j=0, dest=r_buffer, src=s_buffer; j<vblocks; j++, dest+=vskip, src+=vskip) {
      halo_dtcopy(dest,src,vstride,type->subtype);
    }
  }

}
Пример #2
0
/** Perform communication according to the parallelization scheme
 *  described by the halo communicator
 * @param hc halo communicator describing the parallelization scheme
 */
void halo_communication(HaloCommunicator *hc, void *base) {
  int n, comm_type, s_node, r_node;
  void *s_buffer, *r_buffer ;

  Fieldtype fieldtype;
  MPI_Datatype datatype;
  MPI_Request request;
  MPI_Status status;

    HALO_TRACE(fprintf(stderr, "%d: halo_comm base=%p num=%d\n", this_node, base, hc->num)) ;

    for (n = 0; n < hc->num; n++) {

	HALO_TRACE(fprintf(stderr, "%d: halo_comm round %d\n", this_node, n)) ;

	comm_type = hc->halo_info[n].type ;
	s_buffer = (char *)base + hc->halo_info[n].s_offset;
	r_buffer = (char *)base + hc->halo_info[n].r_offset;

	switch (comm_type) {

	    case HALO_LOCL:
	      fieldtype = hc->halo_info[n].fieldtype;
	      halo_dtcopy(r_buffer,s_buffer,1,fieldtype);
	      break ;

	    case HALO_SENDRECV:
	      datatype = hc->halo_info[n].datatype;
	      s_node = hc->halo_info[n].source_node ;
	      r_node = hc->halo_info[n].dest_node ;
	      
	      HALO_TRACE(fprintf(stderr,"%d: halo_comm sendrecv %d to %d (%d) (%p)\n",this_node,s_node,r_node,REQ_HALO_SPREAD,&datatype));

	      MPI_Sendrecv(s_buffer, 1, datatype, r_node, REQ_HALO_SPREAD,
			   r_buffer, 1, datatype, s_node, REQ_HALO_SPREAD,
			   MPI_COMM_WORLD, &status);
	      break ;

	    case HALO_SEND:
	      datatype = hc->halo_info[n].datatype;
	      fieldtype = hc->halo_info[n].fieldtype;
	      s_node = hc->halo_info[n].source_node ;
	      r_node = hc->halo_info[n].dest_node ;
	      
	      HALO_TRACE(fprintf(stderr,"%d: halo_comm send to %d.\n",this_node,r_node));

	      MPI_Isend(s_buffer, 1, datatype, r_node, REQ_HALO_SPREAD, MPI_COMM_WORLD, &request);
	      halo_dtset(r_buffer,0,fieldtype);
	      MPI_Wait(&request,&status);
	      break;

	    case HALO_RECV:
	      datatype = hc->halo_info[n].datatype;
	      s_node = hc->halo_info[n].source_node ;
	      r_node = hc->halo_info[n].dest_node ;

	      HALO_TRACE(fprintf(stderr,"%d: halo_comm recv from %d.\n",this_node,s_node));

	      MPI_Irecv(r_buffer, 1, datatype, s_node, REQ_HALO_SPREAD, MPI_COMM_WORLD, &request);
	      MPI_Wait(&request,&status);
	      break;

	    case HALO_OPEN:
	      fieldtype = hc->halo_info[n].fieldtype;

	      HALO_TRACE(fprintf(stderr,"%d: halo_comm open boundaries\n",this_node));

	      /* \todo this does not work for the n_i - <n_i> */
	      halo_dtset(r_buffer,0,fieldtype);
	      break;
	      
	}

    }

}
Пример #3
0
/** Perform communication according to the parallelization scheme
 *  described by the halo communicator
 * @param hc halo communicator describing the parallelization scheme
 */
void halo_communication(HaloCommunicator *hc) {
  int n, comm_type, s_node, r_node;
  void *s_buffer, *r_buffer ;

  Fieldtype fieldtype;
  MPI_Datatype datatype;
  MPI_Request request;
  MPI_Status status;

    HALO_TRACE(fprintf(stderr, "%d: halo_comm %p (num=%d)\n", this_node, hc, hc->num)) ;

    for (n = 0; n < hc->num; n++) {

	HALO_TRACE(fprintf(stderr, "%d: halo_comm round %d\n", this_node, n)) ;

	comm_type = hc->halo_info[n].type ;
	s_buffer = hc->halo_info[n].send_buffer ;
	r_buffer = hc->halo_info[n].recv_buffer ;

	switch (comm_type) {

	    case HALO_LOCL:
	      fieldtype = hc->halo_info[n].fieldtype;
	      halo_dtcopy(r_buffer,s_buffer,fieldtype);
	      break ;

	    case HALO_SENDRECV:
	      datatype = hc->halo_info[n].datatype;
	      s_node = hc->halo_info[n].source_node ;
	      r_node = hc->halo_info[n].dest_node ;
	      
	      HALO_TRACE(fprintf(stderr,"%d: halo_comm sendrecv %d to %d (%d) (%p)\n",this_node,s_node,r_node,REQ_HALO_SPREAD,&datatype));

	      MPI_Sendrecv(s_buffer, 1, datatype, r_node, REQ_HALO_SPREAD,
			   r_buffer, 1, datatype, s_node, REQ_HALO_SPREAD,
			   MPI_COMM_WORLD, &status);
	      break ;

	    case HALO_SEND:
	      datatype = hc->halo_info[n].datatype;
	      fieldtype = hc->halo_info[n].fieldtype;
	      s_node = hc->halo_info[n].source_node ;
	      r_node = hc->halo_info[n].dest_node ;
	      
	      HALO_TRACE(fprintf(stderr,"%d: halo_comm send to %d.\n",this_node,r_node));

	      MPI_Isend(s_buffer, 1, datatype, r_node, REQ_HALO_SPREAD, MPI_COMM_WORLD, &request);
	      halo_dtset(r_buffer,0,fieldtype);
	      MPI_Wait(&request,&status);
	      break;

	    case HALO_RECV:
	      datatype = hc->halo_info[n].datatype;
	      s_node = hc->halo_info[n].source_node ;
	      r_node = hc->halo_info[n].dest_node ;

	      HALO_TRACE(fprintf(stderr,"%d: halo_comm recv from %d.\n",this_node,s_node));

	      MPI_Irecv(r_buffer, 1, datatype, s_node, REQ_HALO_SPREAD, MPI_COMM_WORLD, &request);
	      MPI_Wait(&request,&status);
	      break;

	    case HALO_OPEN:
	      fieldtype = hc->halo_info[n].fieldtype;

	      HALO_TRACE(fprintf(stderr,"%d: halo_comm open boundaries\n",this_node));

	      halo_dtset(r_buffer,0,fieldtype);
	      break;
	      
	}

    }

    HALO_TRACE(fprintf(stderr, "%d: halo_comm %p finished\n", this_node, hc));

}