コード例 #1
0
  void enumerated_subrange<GRID,GT>::construct(CELLIT c) 
{
  REQUIRE_ALWAYS(cells.empty(), "enumerated_subrange<GRID>::construct: cells must be empty!",1);
  while(! c.IsDone()) {
    append_cell(c.handle());
    ++c;
  }
  init();
  init_counts();
}
コード例 #2
0
ファイル: m-align.c プロジェクト: drpowell/Alignment_Prob
DOUBLE doAlign(unsigned char *seqA, unsigned char *seqB, int lenA, int lenB,
	       DOUBLE seqA_enc[][ALPHA_SIZE], DOUBLE seqA_cum[],
	       DOUBLE seqB_enc[][ALPHA_SIZE], DOUBLE seqB_cum[],
	       DOUBLE *final_counts /* If the caller wants the final counts */
	       )
{
  DOUBLE mdlCost;
  struct cell D[2][lenB+1];
  int i,j;

  struct val_counts empty, final;
  
#ifdef DO_COUNTS
  init_counts(empty.counts);
  init_counts(final.counts);
#endif
  final.val = INFINITY;
コード例 #3
0
ファイル: osu_latency_rdp.cpp プロジェクト: fthaler/uDeviceX
int main (int argc, char *argv[])
{
    int myid, numprocs, i;
    int size;
    float **s_buf, **r_buf;
    double t_start = 0.0, t_end = 0.0;

    if (argc == 2) {
	root_id = atoi(argv[1]);
    }

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &myid);

    if(myid == root_id) {
         fprintf(stderr, "Running test with root id %d and %d proceses\n", root_id, numprocs);
	fflush(0);
    }

    init_counts(myid);

    s_buf = (float **)malloc(numprocs*sizeof(float *));
    r_buf = (float **)malloc(numprocs*sizeof(float *));
    if (myid == root_id) { 
	for (i = 1; i < numprocs; i++) {
		s_buf[i] = (float *)calloc(1, send_counts[(i-1)%26]*sizeof(float));
		r_buf[i] = (float *)calloc(1, send_counts[(i-1)%26]*sizeof(float));
	}
    }
    else {
	s_buf[0] = (float *)calloc(1, send_counts[(myid-1)%26]*sizeof(float));
	r_buf[0] = (float *)calloc(1, send_counts[(myid-1)%26]*sizeof(float));
    }


	loop = LOOP_LARGE;
	skip = SKIP_LARGE;

	MPI_Barrier(MPI_COMM_WORLD);

	MPI_Request req[2*numprocs];
	MPI_Status statuses[2*numprocs];

	HPM hpm;

	if(myid == root_id) {
		for(i = 0; i < loop + skip; i++) {
			if(i == skip) t_start = MPI_Wtime();
			int rq, r, n;
			if (i >= skip) hpm.HPM_Start("loop");

			rq = 0;
			for (n = 0; n < numprocs; n++) {
				if (n == root_id) continue;
				MPI_Irecv(r_buf[n], send_counts[(n-1)%26], MPI_FLOAT, n, 1, MPI_COMM_WORLD, &req[rq]);
				rq++;
			}

			for (n = 0; n < numprocs; n++) {
				if (n == root_id) continue;
				MPI_Isend(s_buf[n], send_counts[(n-1)%26], MPI_FLOAT, n, 1, MPI_COMM_WORLD, &req[rq]);
				rq++;
			}
			MPI_Waitall(rq, req, statuses);
			if (i >= skip) hpm.HPM_Stop("loop");

		}

		t_end = MPI_Wtime();
	}
	else if (myid != root_id) {
		for(i = 0; i < loop + skip; i++) {
			int r, rq;
		
			rq=0;
			MPI_Irecv(r_buf[0], send_counts[(myid-1)%26], MPI_FLOAT, root_id, 1, MPI_COMM_WORLD, &req[rq]);
			rq++;
#if 1
			MPI_Isend(s_buf[0], send_counts[(myid-1)%26], MPI_FLOAT, root_id, 1, MPI_COMM_WORLD, &req[rq]);
			rq++;
#else
			MPI_Send(s_buf[0], send_counts[(myid-1)%26], MPI_FLOAT, root_id, 1, MPI_COMM_WORLD);
#endif
			MPI_Waitall(rq, req, statuses);
		}
	}

	if(myid == root_id) {
		double latency = (t_end - t_start) * 1e6 / (1.0 * loop);
		fprintf(stdout, "latency = %f\n", latency);
		fflush(stdout);
		hpm.HPM_Stats();
	}

	MPI_Finalize();

	return 0;
}