Пример #1
0
void NEKTAR_MEX::MEX_max_fabs(double *val){

  double a,b;
  double *dp;
  int *map;
  int i, j,partner,index;

  MEX_post_recv();


  for (partner = 0; partner < Npartners; ++partner){
    dp = send_buffer[partner];
    map = message_send_map[partner];
    for (i = 0; i < message_size[partner]; ++i)
       dp[i] = val[map[i]];
  }

  MEX_post_send();


  for (partner = 0; partner < Npartners; partner++){
     MPI_Waitany(Npartners,request_recv,&index,MPI_STATUS_IGNORE);
     dp = recv_buffer[index];
     map = message_recv_map[index];
     for (i = 0; i < message_size[index]; ++i){
        j = map[i];
        a = fabs(val[j]);
        b = fabs(dp[i]);
        if (b>a)
           val[j] = dp[i];
     }
  }
  MPI_Waitall(Npartners,request_send,MPI_STATUS_IGNORE);
}
Пример #2
0
    /*
      We only output once every output_interval time unit, at most.
      Without that restriction, we can easily create a huge output
      file.  Printing a record for ten fish takes about 300 bytes, so
      for every 1000 steps, we could dump 300K of info.  Now scale the
      number of fish by 1000...
     */
    trace_begin(TRACE_OUTPUT);
    if (outputp && curr_time >= output_time) {
      if (0 == rank)
		output_fish (output_fp, curr_time, dt, fish, n_fish);
		output_time = curr_time + output_interval;
    }
    trace_end(TRACE_OUTPUT);

    trace_begin (TRACE_LOCAL_COMP);
    //interact_fish (local_fish, n_local_fish, fish, n_fish);

    local_max_norm = compute_norm (local_fish, n_local_fish);
    trace_end (TRACE_LOCAL_COMP);

    trace_begin (TRACE_MAX_NORM);
    start_mpi_timer (&mpi_timer);
printf("rank[%d], iter[%d] ------- Allreduce max_norm, \n", rank, iter);
	MPI_Allreduce (&local_max_norm, &max_norm, 1, MPI_DOUBLE, MPI_MAX, comm);
printf("rank[%d], iter[%d] ------- local_max_norm: %g, max_norm: %g\n", local_max_norm, max_norm);
    stop_mpi_timer (&mpi_timer);
    trace_end (TRACE_MAX_NORM);

    trace_begin (TRACE_LOCAL_COMP);
    dt = max_norm_change / max_norm;
    dt = f_max(dt, min_dt);
    dt = f_min(dt, max_dt);

printf("rank[%d], iter[%d] ------- moving [%d] local_fish, \n", rank, iter, n_local_fish);
    move_fish(local_fish, n_local_fish, dt);
printf("rank[%d], iter[%d] ------- finished moving.\n", rank, iter);
    
    trace_end (TRACE_LOCAL_COMP);
iter++;
  }

  stop_mpi_timer(&total_timer);

#ifdef TRACE_WITH_VAMPIR
    VT_traceoff();
#endif

  if (outputp) {
    MPI_Allgatherv (local_fish, n_local_fish, fishtype,
		    fish, n_fish_split, fish_off, fishtype, comm);
    if (0 == rank) {
      output_fish (output_fp, curr_time, dt, fish, n_fish);
      printf("\tEnded at %g (%g), %d (%d) steps\n",
	     curr_time, end_time, steps, max_steps);
    }
  }

printf("rank[%d], ------- 39, \n", rank);
  MPI_Reduce (&total_timer, &sum_total_timer, 1, MPI_DOUBLE,
	      MPI_SUM, 0, comm);
printf("rank[%d], ------- 40, \n", rank);
  MPI_Reduce (&gather_timer, &sum_gather_timer, 1, MPI_DOUBLE,
	      MPI_SUM, 0, comm);
printf("rank[%d], ------- 41, \n", rank);
  MPI_Reduce (&mpi_timer, &sum_mpi_timer, 1, MPI_DOUBLE,
	      MPI_SUM, 0, comm);
printf("rank[%d], ------- 42, \n", rank);

  if (0 == rank) {
    printf("Number of PEs: %d\n"
	   "Time taken on 0: %g (avg. %g)\n"
	   "Time in gathers on 0: %g (avg %g)\n"
	   "Time in MPI on 0: %g (avg %g)\n",
	   n_proc,
	   total_timer, sum_total_timer / n_proc,
	   gather_timer, sum_gather_timer / n_proc,
	   mpi_timer, sum_mpi_timer / n_proc);
  }

printf("rank[%d], ------- 43, \n", rank);
  MPI_Barrier (comm);
printf("rank[%d], ------- 44, \n", rank);
  MPI_Finalize ();
printf("rank[%d], ------- done!!, \n", rank);
  return 0;
}

//*************************************************************************************/
////////////////////////////////////////////////////////////////////////////////////////
// Isend_receive_fish()
////////////////////////////////////////////////////////////////////////////////////////
//*************************************************************************************/
void Isend_receive_fish(fish_t** send_fish, int* n_send_fish, 
						fish_t receive_fish[NUM_NEIGHBOR][n_fish], int n_receive_fish,
						MPI_Request* sendReqArray, MPI_Request* recvReqArray) {
	int mesTag = 0;
	int rankNeighbor[NUM_NEIGHBOR];
    rankNeighbor[0] = rank - row - 1;
    rankNeighbor[1] = rank - row;
    rankNeighbor[2] = rank - row + 1;
    rankNeighbor[3] = rank - 1;
    rankNeighbor[4] = rank + 1;
    rankNeighbor[5] = rank + row - 1;
    rankNeighbor[6] = rank + row;
    rankNeighbor[7] = rank + row + 1;						
						
	assert(n_proc >= 9);
    //Index for receive_fish is the rank of the source the message's coming from.
    
	// sendrecv from neighbor 0, 7
    if (!(rank < column) && !(rank % column == 0)) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[0], \n", rank, iter, n_send_fish[0]);
        // neighbor 0
        MPI_Isend(send_fish[0], n_send_fish[0], fishtype, rankNeighbor[0], mesTag, comm, &sendReqArray[0]);
        // neighbor 7
        MPI_Irecv(receive_fish[0], n_receive_fish, fishtype, rankNeighbor[0], MPI_ANY_TAG, comm, &recvReqArray[0]);
	} else {
		recvReqArray[0] = MPI_REQUEST_NULL;
	}

    if (!(rank >= n_proc - column) && ((rank + 1) % column != 0)) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[7], \n", rank, iter, n_send_fish[7]);
        // neighbor 0
        MPI_Irecv(receive_fish[7], n_receive_fish, fishtype, rankNeighbor[7], MPI_ANY_TAG, comm, &recvReqArray[7]);
        // neighbor 7
        MPI_Isend(send_fish[7], n_send_fish[7], fishtype, rankNeighbor[7], mesTag, comm, &sendReqArray[7]);
    } else {
		recvReqArray[7] = MPI_REQUEST_NULL;
	}

    //j++;
    // sendrecv from neighbor 1, 6
    if (!(rank < column)) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[1], \n", rank, iter, n_send_fish[1]);
        // neighbor 1
        MPI_Isend(send_fish[1], n_send_fish[1], fishtype, rankNeighbor[1], mesTag, comm, &sendReqArray[1]);
        // neighbor 6
        MPI_Irecv(receive_fish[1], n_receive_fish, fishtype, rankNeighbor[1], MPI_ANY_TAG, comm, &recvReqArray[1]);
    } else {
		recvReqArray[1] = MPI_REQUEST_NULL;
	}


    if (!(rank >= n_proc - column)){
        // neighbor 1
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[6], \n", rank, iter, n_send_fish[6]);
        MPI_Irecv(receive_fish[6], n_receive_fish, fishtype, rankNeighbor[6], MPI_ANY_TAG, comm, &recvReqArray[6]);
        // neighbor 6
        MPI_Isend(send_fish[6], n_send_fish[6], fishtype, rankNeighbor[6], mesTag, comm, &sendReqArray[6]);
    } else {
		recvReqArray[6] = MPI_REQUEST_NULL;
	}


     // sendrecv from neighbor 2, 5
    if (!(rank < column) && ((rank + 1) % column != 0)) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[2], \n", rank, iter, n_send_fish[2]);
        // neighbor 2
        MPI_Isend(send_fish[2], n_send_fish[2], fishtype, rankNeighbor[2], mesTag, comm, &sendReqArray[2]);
        // neighbor 5
        MPI_Irecv(receive_fish[2], n_receive_fish, fishtype, rankNeighbor[2], MPI_ANY_TAG, comm, &recvReqArray[2]);
    } else {
		recvReqArray[2] = MPI_REQUEST_NULL;
	}


    if (!(rank >= n_proc - column) && (rank % column != 0)) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[5], \n", rank, iter, n_send_fish[5]);
        // neighbor 2
        MPI_Irecv(receive_fish[5], n_receive_fish, fishtype, rankNeighbor[5], MPI_ANY_TAG, comm, &recvReqArray[5]);
        // neighbor 5
        MPI_Isend(send_fish[5], n_send_fish[5], fishtype, rankNeighbor[5], mesTag, comm, &sendReqArray[5]);
    } else {
		recvReqArray[5] = MPI_REQUEST_NULL;
	}


    //j++;
    // sendrecv from neighbor 3, 4
    if (rank % column != 0) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[3], \n", rank, iter, n_send_fish[3]);
        // neighbor 3
        MPI_Isend(send_fish[3], n_send_fish[3], fishtype, rankNeighbor[3], mesTag, comm, &sendReqArray[3]);
        // neighbor 4
        MPI_Irecv(receive_fish[3], n_receive_fish, fishtype, rankNeighbor[3], MPI_ANY_TAG, comm, &recvReqArray[3]);
    } else {
		recvReqArray[3] = MPI_REQUEST_NULL;
	}


    if ((rank + 1) % column != 0){
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[4], \n", rank, iter, n_send_fish[4]);
        // neighbor 3
        MPI_Irecv(receive_fish[4], n_receive_fish, fishtype, rankNeighbor[4], MPI_ANY_TAG, comm, &recvReqArray[4]);
        // neighbor 4
        MPI_Isend(send_fish[4], n_send_fish[4], fishtype, rankNeighbor[4], mesTag, comm, &sendReqArray[4]);
    }    else {
		recvReqArray[4] = MPI_REQUEST_NULL;
	}
}


//*************************************************************************************/
////////////////////////////////////////////////////////////////////////////////////////
// wait_for_fish()
////////////////////////////////////////////////////////////////////////////////////////
//*************************************************************************************/
void wait_for_fish(MPI_Request* recvReqArray, int* n_fish) {
    // Now wait for any recv to come back.
    int k;
    int arrayIndex;
	int numNeighbor = NUM_NEIGHBOR;
    
	if ((rank < column) || ((rank + 1) % column == 0) || (rank >= n_proc - column) || (rank % column == 0))
        numNeighbor = 5;
    if (rank == 0 || rank == column - 1 || rank == n_proc - 1 || rank == n_proc - column)
        numNeighbor = 3;

	MPI_Status statusArray[numNeighbor];

	// Zero the count array.
	for (k = 0; k < NUM_NEIGHBOR; k++) {
		n_fish[k] = 0;
	}
	
    for (k = 0; k < numNeighbor; ++k) {
        MPI_Waitany(NUM_NEIGHBOR, recvReqArray, &arrayIndex, &statusArray[k]);
		//recvReqArray[arrayIndex] = MPI_REQUEST_NULL;

		assert(arrayIndex >= 0 && arrayIndex <= 7);
		if (arrayIndex != MPI_UNDEFINED) {
dbg--;
			MPI_Get_count(&statusArray[k], fishtype, &n_fish[arrayIndex]);
		} 
	}
}
Пример #3
0
SEXP spmd_waitany(SEXP R_count, SEXP R_status){
	int index;
	spmd_errhandler(
		MPI_Waitany(INTEGER(R_count)[0], request, &index,
			&status[INTEGER(R_status)[0]]));
	return(AsInt(index));
} /* End of spmd_waitany(). */
Пример #4
0
static void event_loop(event_queue_t queue,int block){
    while(queue->pending){
        Debug("MPI waiting for %d events",queue->pending);
        int index[queue->pending];
        int completed;
        MPI_Status status[queue->pending];
        if (block) {
            Debug("MPI_Waitsome");
            //int res = MPI_Waitsome(queue->pending,queue->request,&completed,index,status);
            int res = MPI_Waitany(queue->pending,queue->request,index,status);
            completed=1;
            Debug("MPI_Waitsome : %d",res);
            if (res != MPI_SUCCESS) Abort("MPI_Waitsome");
            queue->wait_some_calls++;
            if (completed>1) queue->wait_some_multi++;
            block=0;
        } else {
            Debug("MPI_Testsome");
            //int res = MPI_Testsome(queue->pending,queue->request,&completed,index,status);
            int flag;
            int res = MPI_Testany(queue->pending,queue->request,index,&flag,status);
            completed=flag?1:0;
            Debug("MPI_Testsome : %d",res);
            if (res != MPI_SUCCESS) Abort("MPI_Testsome");
            queue->test_some_calls++;
            if (completed==0) {
                queue->test_some_none++;
                Debug("MPI exit event loop");
                return;
            }
            if (completed>1) queue->test_some_multi++;
        }
        Debug("MPI completion of %d events",completed);
        event_callback cb[completed];
        void *ctx[completed];
        for(int i=0;i<completed;i++){
            cb[i]=queue->cb[index[i]];
            queue->cb[index[i]]=NULL;
            ctx[i]=queue->context[index[i]];
        }
        int k=0;
        for(int i=0;i<queue->pending;i++){
            if (queue->cb[i]) {
                if (k<i) {
                    queue->request[k]=queue->request[i];
                    queue->cb[k]=queue->cb[i];
                    queue->context[k]=queue->context[i];
                }
                k++;
            }
        }
        queue->pending=k;
        for(int i=0;i<completed;i++) {
            Debug("MPI call back");
            cb[i](ctx[i],&status[i]);
            Debug("MPI call back done");
        }
    }
    Debug("MPI exit loop");
}
Пример #5
0
int       Zoltan_Comm_Do_Wait(
ZOLTAN_COMM_OBJ * plan,		/* communication data structure */
int tag,			/* message tag for communicating */
char *send_data,		/* array of data I currently own */
int nbytes,			/* multiplier for sizes */
char *recv_data)		/* array of data I'll own after comm */
{
    MPI_Status status;		/* return from Waitany */
    int       my_proc;		/* processor ID */
    int       self_num;		/* where in send list my_proc appears */
    int       i, j, k, jj;	/* loop counters */

    MPI_Comm_rank(plan->comm, &my_proc);    
    
    /* Wait for messages to arrive & unpack them if necessary. */
    /* Note: since request is in plan, could wait in later routine. */

    if (plan->indices_from == NULL) {	/* No copying required */
        if (plan->nrecvs > 0) {
	    MPI_Waitall(plan->nrecvs, plan->request, plan->status);
	}
    }

    else {			 	/* Need to copy into recv_data. */
	if (plan->self_msg) {		/* Unpack own data before waiting */
	    for (self_num = 0; self_num < plan->nrecvs + plan->self_msg; self_num++) 
		if (plan->procs_from[self_num] == my_proc) break;
	    k = plan->starts_from[self_num];
            if (!plan->sizes_from || plan->sizes_from[self_num]) {
	        for (j = plan->lengths_from[self_num]; j; j--) {
		    memcpy(&recv_data[plan->indices_from[k] * nbytes],
		        &plan->recv_buff[k * nbytes], nbytes);
		    k++;
	        }
	    }
	}
	else
	    self_num = plan->nrecvs;

	for (jj = 0; jj < plan->nrecvs; jj++) {

	    MPI_Waitany(plan->nrecvs, plan->request, &i, &status);

            if (i == MPI_UNDEFINED) break;  /* No more receives */

	    if (i >= self_num) i++;

	    k = plan->starts_from[i];
	    for (j = plan->lengths_from[i]; j; j--) {
		memcpy(&recv_data[plan->indices_from[k] * nbytes],
		    &plan->recv_buff[k * nbytes], nbytes);
		k++;
	    }
	}

	ZOLTAN_FREE(&plan->recv_buff);
    }

    return (ZOLTAN_OK);
}
Пример #6
0
/*!
    Waits for any receive to completes and returns the associated rank.

    If there are no active recevies, the call returns MPI_UNDEFINED.

    \param if set to true
    \result The rank of the completed receive or MPI_UNDEFINED if there was
    no active receives.
*/
int DataCommunicator::waitAnyRecv()
{
    // Wait for a receive to complete
    int id;
    MPI_Waitany(m_recvRequests.size(), m_recvRequests.data(), &id, MPI_STATUS_IGNORE);
    if (id == MPI_UNDEFINED) {
        return MPI_UNDEFINED;
    }

    // If the buffer is a double buffer, swap it
    RecvBuffer &recvBuffer = m_recvBuffers[id];
    if (recvBuffer.isDouble()) {
        recvBuffer.swap();
    }

    // Rank of the request
    int rank = m_recvRanks[id];

    // Restart the recevie
    if (areRecvsContinuous()) {
        startRecv(rank);
    }

    // Return the rank associated to the completed receive
    return rank;
}
Пример #7
0
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  char processor_name[128];
  int namelen = 128;
  int buf0[buf_size];
  int buf1[buf_size];
  int buf2[buf_size];
  int i, flipbit, done;
  MPI_Status status;

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Barrier (MPI_COMM_WORLD);

  if (nprocs < 2)
    {
      printf ("not enough tasks\n");
    }
  else if (rank == 0)
    {
      MPI_Request reqs[3];

      MPI_Irecv (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &reqs[0]);
      MPI_Irecv (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &reqs[1]);
      MPI_Irecv (buf2, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &reqs[2]);

      for (i = 3; i > 0; i--) {
	MPI_Waitany (i, reqs, &done, &status);

	assert (done == (i - 1));

	/* don't let next one start until after waitany call... */
	MPI_Send (&flipbit, 1, MPI_INT, 1, i, MPI_COMM_WORLD);
      }
    }
  else if (rank == 1)
    {
      memset (buf0, 1, buf_size*sizeof(int));

      for (i = 3; i > 0; i--) {
	MPI_Recv (&flipbit, 1, MPI_INT, 0, i, MPI_COMM_WORLD, &status);
	
	MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
      }
    }

  MPI_Barrier (MPI_COMM_WORLD);

  MPI_Finalize ();
  printf ("(%d) Finished normally\n", rank);
}
int 	MPI_Waitany_Wrapper(int count, MPI_Request *array_of_requests, int *index, MPI_Status *status)
{
#ifdef COMMPI
  char *me = ft_mpi_routine_names[MPI_Waitany_cntr];
  int ierr;
  FT_INITIALIZE(me, ft_global_ht)
  ft_mpi_cntrs[MPI_Total_cntr]++;
  ft_mpi_cntrs[MPI_Waitany_cntr]++;
#ifdef TERRY_TRACE
  if (terry_trace_flag == TRUE) {
  TERRY_MPI_Waitany_cntr++;
  TRCHKGT(BEFORE_MPI_Waitany, cycle, TERRY_MPI_Waitany_cntr, 0, 0, 0); 
  }
#endif
  ierr = MPI_Waitany(count, array_of_requests, index, status);
#ifdef TERRY_TRACE
  if (terry_trace_flag == TRUE) {
  TRCHKGT(AFTER_MPI_Waitany, cycle, TERRY_MPI_Waitany_cntr, 0, 0, 0); 
  }
#endif
  FT_FINALIZE(me, ft_global_ht, 1)
  return(ierr);
#else
  return(0);
#endif
}
Пример #9
0
void  mpi_waitany (int *count, int *request, int *index, int *status, int *ierr)
{
    int c_index;

    *ierr = MPI_Waitany(*count, request, &c_index, (MPI_Status *)status);
    *index = c_index + 1; /* Fortran counts from one not from zero */
    return;
}
Пример #10
0
Файл: waitany.c Проект: 8l/rose
int main(int argc, char **argv) {
  int a;

  MPI_Request reqs[2];

  MPI_Waitany(2, reqs, &a, MPI_STATUS_IGNORE);

  return 0;
}
Пример #11
0
static PetscErrorCode MatStashScatterGetMesg_Ref(MatStash *stash,PetscMPIInt *nvals,PetscInt **rows,PetscInt **cols,PetscScalar **vals,PetscInt *flg)
{
  PetscErrorCode ierr;
  PetscMPIInt    i,*flg_v = stash->flg_v,i1,i2;
  PetscInt       bs2;
  MPI_Status     recv_status;
  PetscBool      match_found = PETSC_FALSE;

  PetscFunctionBegin;
  *flg = 0; /* When a message is discovered this is reset to 1 */
  /* Return if no more messages to process */
  if (stash->nprocessed == stash->nrecvs) PetscFunctionReturn(0);

  bs2 = stash->bs*stash->bs;
  /* If a matching pair of receives are found, process them, and return the data to
     the calling function. Until then keep receiving messages */
  while (!match_found) {
    if (stash->reproduce) {
      i    = stash->reproduce_count++;
      ierr = MPI_Wait(stash->recv_waits+i,&recv_status);CHKERRQ(ierr);
    } else {
      ierr = MPI_Waitany(2*stash->nrecvs,stash->recv_waits,&i,&recv_status);CHKERRQ(ierr);
    }
    if (recv_status.MPI_SOURCE < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Negative MPI source!");

    /* Now pack the received message into a structure which is usable by others */
    if (i % 2) {
      ierr = MPI_Get_count(&recv_status,MPIU_SCALAR,nvals);CHKERRQ(ierr);

      flg_v[2*recv_status.MPI_SOURCE] = i/2;

      *nvals = *nvals/bs2;
    } else {
      ierr = MPI_Get_count(&recv_status,MPIU_INT,nvals);CHKERRQ(ierr);

      flg_v[2*recv_status.MPI_SOURCE+1] = i/2;

      *nvals = *nvals/2; /* This message has both row indices and col indices */
    }

    /* Check if we have both messages from this proc */
    i1 = flg_v[2*recv_status.MPI_SOURCE];
    i2 = flg_v[2*recv_status.MPI_SOURCE+1];
    if (i1 != -1 && i2 != -1) {
      *rows = stash->rindices[i2];
      *cols = *rows + *nvals;
      *vals = stash->rvalues[i1];
      *flg  = 1;
      stash->nprocessed++;
      match_found = PETSC_TRUE;
    }
  }
  PetscFunctionReturn(0);
}
Пример #12
0
void mpi_waitany_(int* count, int* requests, int* index, MPI_Status* status, int* ierr) {
  MPI_Request* reqs;
  int i;

  reqs = xbt_new(MPI_Request, *count);
  for(i = 0; i < *count; i++) {
    reqs[i] = find_request(requests[i]);
  }
  *ierr = MPI_Waitany(*count, reqs, index, status);
  free(reqs);
}
Пример #13
0
/**
  * \brief Waits for any socket to complete operation (\b irecv or \b isend). Used to process data in the arrival order.
  * <b>Unlocking of the socket must be done by client to free the socket</b>. For performance reason tests of the ID may be omitted.
  */
socket_t *socket_seekWait(const channel_t * ch, int direction)
{
	int num;
	MPI_Status status;
	MPI_Waitany(ch->socketsN[direction], ch->requests[direction], &num, &status);
	if(num != MPI_UNDEFINED) {
		socket_t *s = ch->sockets[direction] + num;
		if(!s->locked) error("socket_seekWait: MPI_Waitany pointed to the unlocked socket (cpu = %d, direction = %s).", s->cpu, (s->direction) ? "outcome" : "income");
		return s;
	}
	return NULL;
}
Пример #14
0
void sync_cells_direct(void (*copy_func)(int, int, int, int, int, int, vektor),
				void (*pack_func)(msgbuf*, int, int, int, vektor),
				void (*unpack_func)(msgbuf*, int, int, int), int all) {
	int i,k;

	int sendCells;
	int recvCells;
	int totalOperations;

	if (all){
		sendCells = lb_nTotalComms;
		recvCells = lb_nTotalComms;
	} else {
		sendCells = lb_nTotalComms-lb_nForceComms;
		recvCells = lb_nForceComms;
	}
	totalOperations = sendCells + recvCells;

	MPI_Status stat;

	empty_mpi_buffers();

	for (i = 0; i<sendCells;++i){
		/*Send data away*/
		lb_copyCellDataToSend(&lb_send_buf[i], lb_sendCells[i], lb_nSendCells[i], pack_func, lb_commIndexToCpu[i]);
		isend_buf(&lb_send_buf[i], lb_commIndexToCpu[i], &lb_req_send[i]);
		lb_requests[i] = lb_req_send[i];
		lb_request_indices[i] = -1; /* Indicates no processing required */
	}

	for (i = 0; i<recvCells;++i){
		/*Start receiving data*/
		k = (lb_nTotalComms-1)-i;
		irecv_buf(&lb_recv_buf[k], lb_commIndexToCpu[k], &lb_req_recv[k]);
		lb_requests[i+sendCells] = lb_req_recv[k];
		lb_request_indices[i+sendCells] = k;
	}

	/*Receive and process data as soon as something is available*/
	for (i = totalOperations; i>0; i--){
		int finished;
		MPI_Waitany(i, lb_requests, &finished, &stat);
		int ind = lb_request_indices[finished];
		if (ind != -1){
			MPI_Get_count(&stat, REAL, &lb_recv_buf[ind].n);

			lb_unpackCellDataFromBuffer(&lb_recv_buf[ind], lb_commIndexToCpu[ind], (*unpack_func));
		}
		lb_requests[finished] = lb_requests[i-1];
		lb_request_indices[finished] = lb_request_indices[i-1];
	}
}
Пример #15
0
JNIEXPORT jint JNICALL Java_mpi_Request_waitAny(
        JNIEnv *env, jclass clazz, jlongArray requests)
{
    int count = (*env)->GetArrayLength(env, requests);
    jlong* jReq;
    MPI_Request *cReq;
    ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq);
    int index;
    int rc = MPI_Waitany(count, cReq, &index, MPI_STATUS_IGNORE);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq);
    return index;
}
Пример #16
0
HYPRE_Int
hypre_MPI_Waitany( HYPRE_Int          count,
                   hypre_MPI_Request *array_of_requests,
                   HYPRE_Int         *index,
                   hypre_MPI_Status  *status )
{
   hypre_int mpi_index;
   HYPRE_Int ierr;
   ierr = (HYPRE_Int) MPI_Waitany((hypre_int)count, array_of_requests,
                                  &mpi_index, status);
   *index = (HYPRE_Int) mpi_index;
   return ierr;
}
Пример #17
0
//------------------------------------------------------------------------
int mirrorProcs(MPI_Comm comm, std::vector<int>& toProcs, std::vector<int>& fromProcs)
{
  fromProcs.resize(0);
#ifdef FEI_SER
  fromProcs.push_back(0);
  return(0);
#else
  int num_procs = fei::numProcs(comm);
  std::vector<int> tmpIntData(num_procs*3, 0);

  int* buf = &tmpIntData[0];
  int* recvbuf = buf+num_procs;

  for(unsigned i=0; i<toProcs.size(); ++i) {
    buf[toProcs[i]] = 1;
  }

  for(int ii=2*num_procs; ii<3*num_procs; ++ii) {
    buf[ii] = 1;
  }

  CHK_MPI( MPI_Reduce_scatter(buf, &(buf[num_procs]), &(buf[2*num_procs]),
                              MPI_INT, MPI_SUM, comm) );

  int numRecvProcs = buf[num_procs];

  int tag = 11116;
  std::vector<MPI_Request> mpiReqs(numRecvProcs);

  int offset = 0;
  for(int ii=0; ii<numRecvProcs; ++ii) {
    CHK_MPI( MPI_Irecv(&(recvbuf[ii]), 1, MPI_INT, MPI_ANY_SOURCE, tag,
                       comm, &(mpiReqs[offset++])) );
  }

  for(unsigned i=0; i<toProcs.size(); ++i) {
    CHK_MPI( MPI_Send(&(toProcs[i]), 1, MPI_INT, toProcs[i], tag, comm) );
  }

  MPI_Status status;
  for(int ii=0; ii<numRecvProcs; ++ii) {
    int index;
    MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status);
    fromProcs.push_back(status.MPI_SOURCE);
  }

  std::sort(fromProcs.begin(), fromProcs.end());

  return(0);
#endif
}
Пример #18
0
int main(int argc, char **argv)
{
	MPI_Init(&argc, &argv);

	int rank;
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Request r[2];
	int d;
	int d2[2];
	if (rank == 0) {
		MPI_Isend(&d, 1, MPI_INT, 1, 10, MPI_COMM_WORLD, &r[0]);
		MPI_Isend(&d, 1, MPI_INT, 3, 10, MPI_COMM_WORLD, &r[1]);
		int i;
		MPI_Waitany(2, r, &i, MPI_STATUS_IGNORE);
		//MPI_Waitall(2, r, MPI_STATUSES_IGNORE);
		MPI_Recv(&d, 1, MPI_INT, 1, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		MPI_Recv(&d2, 1, MPI_INT, 3, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		MPI_Waitany(2, r, &i, MPI_STATUS_IGNORE);
	}

	if (rank == 1) {
		MPI_Ssend(&d, 1, MPI_INT, 0, 10, MPI_COMM_WORLD);
		MPI_Recv(&d, 1, MPI_INT, 0, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	}

	if (rank == 2) {
		MPI_Ssend(&d, 1, MPI_INT, 3, 20, MPI_COMM_WORLD);
	}

	if (rank == 3) {
		MPI_Recv(&d, 1, MPI_INT, 2, 20, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		MPI_Send(&d, 1, MPI_INT, 0, 10, MPI_COMM_WORLD);
		MPI_Recv(&d, 1, MPI_INT, 0, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	}

	MPI_Finalize();
	return 0;
}
Пример #19
0
JNIEXPORT void JNICALL Java_mpi_Request_waitAnyStatus(
        JNIEnv *env, jclass clazz, jlongArray requests, jobject stat)
{
    int count = (*env)->GetArrayLength(env, requests);
    jlong* jReq;
    MPI_Request *cReq;
    ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq);
    int index;
    MPI_Status status;
    int rc = MPI_Waitany(count, cReq, &index, &status);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq);
    ompi_java_status_setIndex(env, stat, &status, index);
}
Пример #20
0
void remap_2d(double *in, double *out, double *buf,
	      struct remap_plan_2d *plan)

{
  MPI_Status status;
  int i,isend,irecv;
  double *scratch;

  if (plan->memory == 0)
    scratch = buf;
  else
    scratch = plan->scratch;

/* post all recvs into scratch space */

  for (irecv = 0; irecv < plan->nrecv; irecv++)
    MPI_Irecv(&scratch[plan->recv_bufloc[irecv]],plan->recv_size[irecv],
	      MPI_DOUBLE,plan->recv_proc[irecv],0,
	      plan->comm,&plan->request[irecv]);

/* send all messages to other procs */

  for (isend = 0; isend < plan->nsend; isend++) {
    plan->pack(&in[plan->send_offset[isend]],
	       plan->sendbuf,&plan->packplan[isend]);
    MPI_Send(plan->sendbuf,plan->send_size[isend],MPI_DOUBLE,
	     plan->send_proc[isend],0,plan->comm);
  }       

/* copy in -> scratch -> out for self data */

  if (plan->self) {
    isend = plan->nsend;
    irecv = plan->nrecv;
    plan->pack(&in[plan->send_offset[isend]],
	       &scratch[plan->recv_bufloc[irecv]],
	       &plan->packplan[isend]);
    plan->unpack(&scratch[plan->recv_bufloc[irecv]],
		 &out[plan->recv_offset[irecv]],&plan->unpackplan[irecv]);
  }

/* unpack all messages from scratch -> out */

  for (i = 0; i < plan->nrecv; i++) {
    MPI_Waitany(plan->nrecv,plan->request,&irecv,&status);
    plan->unpack(&scratch[plan->recv_bufloc[irecv]],
		 &out[plan->recv_offset[irecv]],&plan->unpackplan[irecv]);
  }
}
Пример #21
0
/*!
    Waits for any send to completes and returns the associated rank.

    If there are no active sends, the call returns MPI_UNDEFINED.

    \result The rank of the completed send or MPI_UNDEFINED if there was
    no active sends.
*/
int DataCommunicator::waitAnySend()
{
    // Wait for a send to complete
    int id;
    MPI_Waitany(m_sendRequests.size(), m_sendRequests.data(), &id, MPI_STATUS_IGNORE);
    if (id == MPI_UNDEFINED) {
        return MPI_UNDEFINED;
    }

    // Reset the position of the buffer
    m_sendBuffers[id].seekg(0);

    // Return the rank associated to the completed send
    return m_sendRanks[id];
}
Пример #22
0
//------------------------------------------------------------------------
int exchangeIntData(MPI_Comm comm,
                    const std::vector<int>& sendProcs,
                    std::vector<int>& sendData,
                    const std::vector<int>& recvProcs,
                    std::vector<int>& recvData)
{
  if (sendProcs.size() == 0 && recvProcs.size() == 0) return(0);
  if (sendProcs.size() != sendData.size()) return(-1);
#ifndef FEI_SER
  recvData.resize(recvProcs.size());
  std::vector<MPI_Request> mpiReqs;
  mpiReqs.resize(recvProcs.size());

  int tag = 11114;
  MPI_Datatype mpi_dtype = MPI_INT;

  //launch Irecv's for recvData:

  int localProc = fei::localProc(comm);
  int numRecvProcs = recvProcs.size();
  int req_offset = 0;
  for(unsigned i=0; i<recvProcs.size(); ++i) {
    if (recvProcs[i] == localProc) {--numRecvProcs; continue; }

    CHK_MPI( MPI_Irecv(&(recvData[i]), 1, mpi_dtype, recvProcs[i], tag,
                       comm, &mpiReqs[req_offset++]) );
  }

  //send the sendData:

  for(unsigned i=0; i<sendProcs.size(); ++i) {
    if (sendProcs[i] == localProc) continue;

    CHK_MPI( MPI_Send(&(sendData[i]), 1, mpi_dtype,
                      sendProcs[i], tag, comm) );
  }

  //complete the Irecv's:

  for(int ii=0; ii<numRecvProcs; ++ii) {
    int index;
    MPI_Status status;
    CHK_MPI( MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status) );
  }

#endif
  return(0);
}
Пример #23
0
  void recv( const vector_type & v )
  {
    const size_t recv_msg_count = m_recv_request.size();
    const std::pair<unsigned,unsigned> recv_range( m_map.count_owned , m_map.count_owned + m_map.count_receive );

    const vector_type vrecv = subview<vector_type>( v , recv_range );

    // Wait for receives and verify:

    for ( size_t i = 0 ; i < recv_msg_count ; ++i ) {
      MPI_Status recv_status ;
      int recv_which = 0 ;
      int recv_size  = 0 ;

      MPI_Waitany( recv_msg_count , & m_recv_request[0] , & recv_which , & recv_status );

      const int recv_proc = recv_status.MPI_SOURCE ;

      MPI_Get_count( & recv_status , MPI_BYTE , & recv_size );

      // Verify message properly received:

      const int  expected_proc = m_map.host_recv(recv_which,0);
      const int  expected_size = m_map.host_recv(recv_which,1) *
                                 m_chunk * sizeof(scalar_type);

      if ( ( expected_proc != recv_proc ) ||
           ( expected_size != recv_size ) ) {
        std::ostringstream msg ;
        msg << "MatrixMultiply communication error:"
            << " P" << comm::rank( m_map.machine )
            << " received from P" << recv_proc
            << " size "     << recv_size
            << " expected " << expected_size
            << " from P"    << expected_proc ;
        throw std::runtime_error( msg.str() );
      }
    }

    // Copy received data to device memory.

    Impl::DeepCopy<typename Device::memory_space,HostSpace>( vrecv.ptr_on_device() ,
                                                             m_host_recv_buffer.ptr_on_device() ,
                                                             m_map.count_receive * m_chunk * sizeof(scalar_type) );
  }
Пример #24
0
void NEKTAR_MEX::MEX_plus(double *val){

  double *dp;
  int *map;
  int i,j,partner,index;

#ifdef MEX_TIMING
  double time_start,time_end,time_MAX, time_MIN;  
  time_start = MPI_Wtime();
#endif

  MEX_post_recv();

  for (partner = 0; partner < Npartners; ++partner){
    dp = send_buffer[partner];
    map = message_send_map[partner];
    for (i = 0; i < message_size[partner]; ++i)
       dp[i] = val[map[i]]; 
  }


  MEX_post_send();


  for (i = 0; i < Npartners; i++){
     MPI_Waitany(Npartners,request_recv,&index,MPI_STATUS_IGNORE);
     dp = recv_buffer[index];
     map = message_recv_map[index];
     for (j = 0; j < message_size[index]; ++j)
        val[map[j]] += dp[j];
  } 
  MPI_Waitall(Npartners,request_send,MPI_STATUS_IGNORE);
 
#ifdef MEX_TIMING  
  time_end = MPI_Wtime();
  MPI_Barrier(comm);
  time_end = time_end - time_start;
  MPI_Reduce ( &time_end, &time_MAX, 1, MPI_DOUBLE, MPI_MAX, 0, comm);
  MPI_Reduce ( &time_end, &time_MIN, 1, MPI_DOUBLE, MPI_MIN, 0, comm);
  if (my_rank == 0)
    fprintf(stdout,"MEX_plus: time_MAX = %e, time_MIN = %e\n",time_MAX, time_MIN);
#endif


}
Пример #25
0
task_t *wait_completion(task_list_t *tlist, proc_list_t *plist,
			req_list_t *rlist) {
  task_t *ptr;
  MPI_Status status;
  int index;

  MPI_Waitany(rlist->nreqs, rlist->reqs, &index, &status);
  if(index!=MPI_UNDEFINED) {
    int nelem;
    MPI_Get_elements(&status, MPI_INT, &nelem);
    assert(nelem == 2);
    ptr = rlist->tasks[index];
    assert(ptr->v[1] == ptr->tskid);
      
    req_list_return_as_idle(rlist, index);
    return ptr;
  }
  return NULL;
}
Пример #26
0
PetscErrorCode VecStashScatterGetMesg_Private(VecStash *stash,PetscMPIInt *nvals,PetscInt **rows,PetscScalar **vals,PetscInt *flg)
{
  PetscErrorCode ierr;
  PetscMPIInt    i;
  PetscInt       *flg_v;
  PetscInt       i1,i2,bs=stash->bs;
  MPI_Status     recv_status;
  PetscTruth     match_found = PETSC_FALSE;

  PetscFunctionBegin;

  *flg = 0; /* When a message is discovered this is reset to 1 */
  /* Return if no more messages to process */
  if (stash->nprocessed == stash->nrecvs) { PetscFunctionReturn(0); } 

  flg_v = stash->nprocs;
  /* If a matching pair of receieves are found, process them, and return the data to
     the calling function. Until then keep receiving messages */
  while (!match_found) {
    ierr = MPI_Waitany(2*stash->nrecvs,stash->recv_waits,&i,&recv_status);CHKERRQ(ierr);
    /* Now pack the received message into a structure which is useable by others */
    if (i % 2) { 
      ierr = MPI_Get_count(&recv_status,MPIU_INT,nvals);CHKERRQ(ierr);
      flg_v[2*recv_status.MPI_SOURCE+1] = i/2; 
    } else { 
      ierr = MPI_Get_count(&recv_status,MPIU_SCALAR,nvals);CHKERRQ(ierr);
      flg_v[2*recv_status.MPI_SOURCE] = i/2; 
      *nvals = *nvals/bs; 
    }
    
    /* Check if we have both the messages from this proc */
    i1 = flg_v[2*recv_status.MPI_SOURCE];
    i2 = flg_v[2*recv_status.MPI_SOURCE+1];
    if (i1 != -1 && i2 != -1) {
      *rows       = stash->rindices + i2*stash->rmax;
      *vals       = stash->rvalues + i1*bs*stash->rmax;
      *flg        = 1;
      stash->nprocessed ++;
      match_found = PETSC_TRUE;
    }
  }
  PetscFunctionReturn(0);
}
Пример #27
0
void send_forces(void (*add_func)   (int, int, int, int, int, int),
                 void (*pack_func)  (msgbuf*, int, int, int),
                 void (*unpack_func)(msgbuf*, int, int, int)) {
	int i,k;

	int sendForces = lb_nForceComms;
	int recvForces = lb_nTotalComms - lb_nForceComms;

	MPI_Status stat;

	empty_mpi_buffers();
	int offset = lb_nTotalComms - lb_nForceComms;

	for (i = 0; i<sendForces;++i){
		/*Send data away*/
		k= i+offset;
		lb_copyForcesDataToSend(&lb_send_buf[k], lb_sendForces[k], lb_nSendForces[k], pack_func);
		isend_buf(&lb_send_buf[k], lb_commIndexToCpu[k], &lb_req_send[k]);
		lb_requests[k] = lb_req_send[k];
		lb_request_indices[k] = -1; /* Indicates no processing required */
	}

	for (i = 0; i<recvForces;++i){
		/*Start receiving data*/
		irecv_buf(&lb_recv_buf[i], lb_commIndexToCpu[i], &lb_req_recv[i]);
		lb_requests[i] = lb_req_recv[i];
		lb_request_indices[i] = i;
	}

	/*Receive and process data as soon as something is available*/
	for (i = lb_nTotalComms; i>0; i--){
		int finished;
		MPI_Waitany(i, lb_requests, &finished, &stat);
		int ind = lb_request_indices[finished];
		if (ind != -1){
			MPI_Get_count(&stat, REAL, &lb_recv_buf[ind].n);
			lb_unpackForcesDataFromBuffer(&lb_recv_buf[ind], (*unpack_func));
		}
		lb_requests[finished] = lb_requests[i-1];
		lb_request_indices[finished] = lb_request_indices[i-1];
	}
}
Пример #28
0
int
hypre_thread_MPI_Waitany( int          count,
             MPI_Request *array_of_requests,
             int         *index,
             MPI_Status  *status            )
{
  int returnval;
  int unthreaded = pthread_equal(initial_thread,pthread_self());
  int I_call_mpi = unthreaded || pthread_equal(hypre_thread[0],pthread_self());
  if (I_call_mpi)
  {
    returnval=MPI_Waitany(count,array_of_requests,index,status);
  }
  else
  {
   returnval=0;
  }
  hypre_barrier(&mpi_mtx, unthreaded);
  return returnval;
}
Пример #29
0
void event_while(event_queue_t queue,int *condition){
    while(*condition){
        int index[queue->pending];
        int completed=1;
        MPI_Status status[queue->pending];
        Debug("MPI_Waitsome in while");
        //int res = MPI_Waitsome(queue->pending,queue->request,&completed,index,status);
        int res = MPI_Waitany(queue->pending,queue->request,index,status);
        // The Waitsome version led to deadlocks in case of multiple requests and nested call-backs.
        // To use Waitsome the callback queue must be moved to the queue data structure.
        Debug("MPI_Waitsome : %d/%d",res,completed);
        if (res != MPI_SUCCESS) Abort("MPI_Waitsome");
        queue->wait_some_calls++;
        if (completed>1) queue->wait_some_multi++;
        event_callback cb[completed];
        void *ctx[completed];
        for(int i=0;i<completed;i++){
            cb[i]=queue->cb[index[i]];
            queue->cb[index[i]]=NULL;
            ctx[i]=queue->context[index[i]];
        }
        int k=0;
        for(int i=0;i<queue->pending;i++){
            if (queue->cb[i]) {
                if (k<i) {
                    queue->request[k]=queue->request[i];
                    queue->cb[k]=queue->cb[i];
                    queue->context[k]=queue->context[i];
                }
                k++;
            }
        }
        queue->pending=k;
        for(int i=0;i<completed;i++) {
            Debug("MPI call back");
            cb[i](ctx[i],&status[i]);
            Debug("MPI call back done");
        }
    }
}
Пример #30
0
void Grid3D::Wait_and_Unload_MPI_Comm_Buffers_BLOCK(int dir, int *flags)
{
  int iwait;
  int index = 0;
  int wait_max=0;
  MPI_Status status;

  //find out how many recvs we need to wait for
  if (dir==0) {
    if(flags[0] == 5) //there is communication on this face
      wait_max++;   //so we'll need to wait for its comm
    if(flags[1] == 5) //there is communication on this face
      wait_max++;   //so we'll need to wait for its comm
  }
  if (dir==1) {
    if(flags[2] == 5) //there is communication on this face
      wait_max++;   //so we'll need to wait for its comm
    if(flags[3] == 5) //there is communication on this face
      wait_max++;   //so we'll need to wait for its comm
  }
  if (dir==2) {
    if(flags[4] == 5) //there is communication on this face
      wait_max++;   //so we'll need to wait for its comm
    if(flags[5] == 5) //there is communication on this face
      wait_max++;   //so we'll need to wait for its comm
  }

  //wait for any receives to complete
  for(iwait=0;iwait<wait_max;iwait++)
  {
    //wait for recv completion
    MPI_Waitany(wait_max,recv_request,&index,&status);
    //if (procID==1) MPI_Get_count(&status, MPI_CHREAL, &count);
    //if (procID==1) printf("Process 1 unloading direction %d, source %d, index %d, length %d.\n", status.MPI_TAG, status.MPI_SOURCE, index, count);
    //depending on which face arrived, load the buffer into the ghost grid
    Unload_MPI_Comm_Buffers(status.MPI_TAG);
  }
}