Пример #1
0
int main(int argc, char *argv[])
{
#if _POSIX_CPUTIME == -1
	printf("_POSIX_CPUTIME unsupported\n");
	return PTS_UNSUPPORTED;
#else
#ifdef CLOCK_PROCESS_CPUTIME_ID
	struct timespec ts1, ts2, ts3, ts4;

	if (sysconf(_SC_CPUTIME) == -1) {
		printf("_POSIX_CPUTIME unsupported\n");
		return PTS_UNSUPPORTED;
	}

	if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts1) != 0) {
		printf("clock_gettime() failed: errno %d\n", errno);
		return PTS_UNRESOLVED;
	}

	dosomething();

	if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts2) != 0) {
		printf("clock_gettime() failed: errno %d\n", errno);
		return PTS_UNRESOLVED;
	}

	dosomething();

	if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts3) != 0) {
		printf("clock_gettime() failed: errno %d\n", errno);
		return PTS_UNRESOLVED;
	}

	dosomething();

	if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts4) != 0) {
		printf("clock_gettime() failed: errno %d\n", errno);
		return PTS_UNRESOLVED;
	}

	if ((ts1.tv_sec <= ts2.tv_sec) &&
	    (ts2.tv_sec <= ts3.tv_sec) && (ts3.tv_sec <= ts4.tv_sec)) {
		printf("Test PASSED\n");
		return PTS_PASS;
	} else {
		printf("Test FAILED - ts1=%ld,ts2=%ld,ts3=%ld,ts4=%ld\n",
		       ts1.tv_sec, ts2.tv_sec, ts3.tv_sec, ts4.tv_sec);
		return PTS_FAIL;
	}

	printf("This code should not be executed.\n");
	return PTS_UNRESOLVED;
#else
	printf("CLOCK_PROCESS_CPUTIME_ID unsupported\n");
	return PTS_UNSUPPORTED;
#endif
#endif

}
Пример #2
0
int main(int argc, char *argv[])
{
#ifndef _POSIX_CPUTIME
    printf("_POSIX_CPUTIME unsupported\n");
    return PTS_UNSUPPORTED;
#else
    clockid_t clockid;
    struct timespec tp1 = {.tv_sec = 0, .tv_nsec = 0};

    dosomething();

    if (clock_getcpuclockid(getpid(), &clockid) != 0) {
        printf("clock_getcpuclockid() failed\n");
        return PTS_FAIL;
    }

    /* Verify that it returned a valid clockid_t that can be used in other functions */
    if (clock_gettime(clockid, &tp1) != 0) {
        printf("clock_getcpuclockid() returned an invalid clockid_t: %d\n", clockid);
        return PTS_FAIL;
    }

    printf("Test PASSED\n");
    return PTS_PASS;
#endif
}
Пример #3
0
int main() {
    int i = 0;
    int a = 1234;

    //for (i = 0; i < 100000000L; i++) {
    for (i = 0; i < 10; i++) {
        printf("PRE : a %d b %d c %d, flag %s\n", a,b,c, flag & 0x01 ? "SET":"NOT SET");

        a = dosomething(PASS_A(a));
        
        printf("POST: a %d b %d c %d, flag %s\n", a,b,c, flag & 0x01 ? "SET":"NOT SET");

//        a = ntohs(a);
    }

    exit(0);
}
Пример #4
0
void process_or_do_work(int fd1, int fd2) {
   char buf[1024];
   ssize_t bytesread;

   for ( ; ; ) {
      bytesread = r_read(fd1, buf, sizeof(buf));
      if ((bytesread == -1) && (errno != EAGAIN))
         return;                                    /* a real error on fd1 */
      else if (bytesread > 0) {
         docommand(buf, bytesread);
         continue;
      }
      bytesread = r_read(fd2, buf, sizeof(buf));
      if ((bytesread == -1) && (errno != EAGAIN))
         return;                                    /* a real error on fd2 */
      else if (bytesread > 0) 
         docommand(buf, bytesread);
      else
         dosomething();          /* input not available, do something else */
   }
}
Пример #5
0
// returns 1 if a key is pressed.
// the key value/index is stored in the global variable NewKey.
char kbhit(){
	unsigned char KeyDATA[] =
	 {'1','2','3','A','4','5','6','B','7','8','9','C','*','0','#','D'};

	for(ColCnt = 0, colnum = 0x04; colnum < 0x40; ColCnt = ColCnt +4){
		PORTD = colnum;	// set column value
		dosomething();	// delay
		rownum = PORTE;	// get row value
		rownum = (rownum & 0x0f);	// mask all but rows
		if(rownum){	// if a row in this column is high
			if(rownum == 0x04) rownum = 3;
			if(rownum == 0x08) rownum = 4;	/* figure row number */
			rownum = rownum - 1;
			keybuf = ColCnt + rownum;	/* calculate key index */
			if(keybuf == LastKey)	return 0; /* ignore if same key pressed */
			LastKey = keybuf;	/* save this key for repeat */
			NewKey = KeyDATA[keybuf];
			return 1;			/* return YES  */
		}
		colnum = colnum << 1;	// shift column left
	}
	LastKey = 0xff;	/* initialize key repeat compare value*/
	return 0;	/* no key pressed, return 0 */
}
Пример #6
0
int main(int argc, char *argv[])
{
	int i;
	double *rbuf;
	FILE *fp = stdout;
	char fn[BUFSIZ];
	/* for an output in the json format */
	char strbuf[BUFSIZ];
	char *ptr;
	int pos, remain;
	int len;
	double timeout_sec = 600.0, st;
	int opt;
	/* If seq_rank >=0, only seq_rank does a real work
	 * other ranks do nothing; waiting until the next sync point
	 */
	int seq_rank = -1;
	int cnt = 0;

	fn[0] = 0;

	MPI_Init(NULL, NULL);

	while ((opt = getopt(argc, argv, "ht:s:o:")) != -1) {
		switch (opt) {
		case 'h':
			usage(argv[0]);
			exit(0);
		case 't':
			timeout_sec = atoi(optarg);
			break;
		case 's':
			seq_rank = atoi(optarg);
			break;
		case 'o':
			snprintf(fn, BUFSIZ, "%s", optarg);
			break;
		}
	}


	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);

	if (rank == 0) {
		printf("# timeout_sec=%lf\n", timeout_sec);
		printf("# mpisize=%d\n", size);

		if (strlen(fn) > 0) {
			fp = fopen(fn, "w");
			if (!fp) {
				fprintf(stderr, "Unable to open %s", fn);
				exit(1);
			}
		}
	}

	set_strict_affinity(size, rank);
	if (rank == 0)
		printf("# set affinity\n");

	if (rank == 0) {
		rbuf = (double *)malloc(size*sizeof(double));
		assert(rbuf);
	}

	MPI_Barrier(MPI_COMM_WORLD);

	initprd();
	st = MPI_Wtime();

	while (1) {
		MPI_Barrier(MPI_COMM_WORLD);

		if ((MPI_Wtime()-st) >= timeout_sec)
			break;

		dosomething(rank, seq_rank);

		MPI_Gather(&prd.gflops, 1, MPI_DOUBLE,
			   rbuf, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);


		if (rank == 0) {
			int j;
			double agg;

			ptr = strbuf;
			remain = BUFSIZ - 1;

			snprintf(ptr, remain, "{\"sample\":\"dgemm\",\"time\":%.2lf",
				 MPI_Wtime());
			len = strlen(ptr);
			ptr += len;
			remain -= len;

			snprintf(ptr, remain, ", \"mpisize\":%d ", size);
			len = strlen(ptr);
			ptr += len;
			remain -= len;

			/* */
			agg = 0.0;
			for (j = 0; j < size; j++) {
				agg += rbuf[j];
				snprintf(ptr, remain, ", \"dgemm%d\":%.3lf ",
					 j, rbuf[j]);
				len = strlen(ptr);
				ptr += len;
				remain -= len;
			}

			snprintf(ptr, remain, ", \"dgemm_agg\":%.3lf ", agg);
			len = strlen(ptr);
			ptr += len;
			remain -= len;

			snprintf(ptr, remain, "}");
			fprintf(fp, "%s\n", strbuf);
			fflush(fp);
			if (0) {
				FILE *fp;

				fp = fopen("dgemmlast.log", "w");
				if (fp) {
					fprintf(fp, "%s\n", strbuf);
					fflush(fp);
					fclose(fp);
				}
			}
		}
	}


	if (rank == 0) {
		if (fp != stdout)
			fclose(fp);
	}

	MPI_Barrier(MPI_COMM_WORLD);
	MPI_Finalize();
	return 0;
}
Пример #7
0
void
test (int n, int o, int p, int q, int r, int s, int *pp)
{
  int a[o], i, j;
  #pragma omp target data device (n + 1) if (n != 6) map (tofrom: n, r)
  {
    #pragma omp target device (n + 1) if (n != 6) map (from: n) map (alloc: a[2:o-2])
      dosomething (a, n, 0);
    #pragma omp target teams device (n + 1) num_teams (n + 4) thread_limit (n * 2) \
    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r)
    {
      r = r + 1;
      p = q;
      dosomething (a, n, p + q);
    }
    #pragma omp target teams distribute device (n + 1) num_teams (n + 4) collapse (2) \
    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	  }
    #pragma omp target teams distribute device (n + 1) num_teams (n + 4) \
    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	  }
    #pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
    	ordered schedule (static, 8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	    #pragma omp ordered
	      p = q;
	    s = i * 10 + j;
	  }
    #pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
      for (i = 0; i < 10; i++)
	{
	  for (j = 0; j < 10; j++)
	    {
	      r = r + 1;
	      p = q;
	      dosomething (a, n, p + q);
	    }
	  #pragma omp ordered
	    p = q;
	  s = i * 10;
	}
    #pragma omp target teams distribute parallel for simd device (n + 1) \
    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
    	schedule (static, 8) num_teams (n + 4) safelen(8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    a[2+i*10+j] = p + q;
	    s = i * 10 + j;
	  }
    #pragma omp target teams distribute parallel for simd device (n + 1) \
    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
    	proc_bind (master) lastprivate (s) schedule (static, 8) \
    	num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
      for (i = 0; i < 10; i++)
	{
	  r = r + 1;
	  p = q;
	  a[2+i] = p + q;
	  s = i * 10;
	}
    #pragma omp target teams distribute simd device (n + 1) \
    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
    	lastprivate (s) num_teams (n + 4) safelen(8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    a[2+i*10+j] = p + q;
	    s = i * 10 + j;
	  }
    #pragma omp target teams distribute simd device (n + 1) \
    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
    	num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
      for (i = 0; i < 10; i++)
	{
	  r = r + 1;
	  p = q;
	  a[2+i] = p + q;
	  s = i * 10;
	}
    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
    #pragma omp teams num_teams (n + 4) thread_limit (n * 2) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r)
    {
      r = r + 1;
      p = q;
      dosomething (a, n, p + q);
    }
    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
    #pragma omp teams distribute num_teams (n + 4) collapse (2) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	  }
    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
    #pragma omp teams distribute num_teams (n + 4) default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	  }
    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
    #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
    	ordered schedule (static, 8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	    #pragma omp ordered
	      p = q;
	    s = i * 10 + j;
	  }
    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
    #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
      for (i = 0; i < 10; i++)
	{
	  for (j = 0; j < 10; j++)
	    {
	      r = r + 1;
	      p = q;
	      dosomething (a, n, p + q);
	    }
	  #pragma omp ordered
	    p = q;
	  s = i * 10;
	}
    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
    #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
    	schedule (static, 8) num_teams (n + 4) safelen(8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    a[2+i*10+j] = p + q;
	    s = i * 10 + j;
	  }
    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
    #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
    	proc_bind (master) lastprivate (s) schedule (static, 8) \
    	num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
      for (i = 0; i < 10; i++)
	{
	  r = r + 1;
	  p = q;
	  a[2+i] = p + q;
	  s = i * 10;
	}
    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
    #pragma omp teams distribute simd default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
    	lastprivate (s) num_teams (n + 4) safelen(8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    a[2+i*10+j] = p + q;
	    s = i * 10 + j;
	  }
    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
    #pragma omp teams distribute simd default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
    	num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
      for (i = 0; i < 10; i++)
	{
	  r = r + 1;
	  p = q;
	  a[2+i] = p + q;
	  s = i * 10;
	}
    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
	num_teams (n + 4) thread_limit (n * 2)default(shared) shared(n) \
	private (p) reduction (+: r)
    #pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate (q)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	  }
    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
	num_teams (n + 4) thread_limit (n * 2) shared(n) private(p) reduction (+ : r) \
	default(shared)
    #pragma omp distribute dist_schedule (static, 4) firstprivate (q)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	  }
    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
	num_teams (n + 4) thread_limit (n * 2)
    #pragma omp distribute parallel for if (n != 6) \
	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
    	collapse (2) dist_schedule (static, 4) \
    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
    	ordered schedule (static, 8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	    #pragma omp ordered
	      p = q;
	    s = i * 10 + j;
	  }
    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
	num_teams (n + 4) thread_limit (n * 2)
    #pragma omp distribute parallel for if (n != 6) \
	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
    	num_threads (n + 4) dist_schedule (static, 4) \
    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
      for (i = 0; i < 10; i++)
	{
	  for (j = 0; j < 10; j++)
	    {
	      r = r + 1;
	      p = q;
	      dosomething (a, n, p + q);
	    }
	  #pragma omp ordered
	    p = q;
	  s = i * 10;
	}
    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
	num_teams (n + 4) thread_limit (n * 2)
    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	collapse (2) dist_schedule (static, 4) \
    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
    	schedule (static, 8) safelen(8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    a[2+i*10+j] = p + q;
	    s = i * 10 + j;
	  }
    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
	num_teams (n + 4) thread_limit (n * 2)
    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	num_threads (n + 4) dist_schedule (static, 4) \
    	proc_bind (master) lastprivate (s) schedule (static, 8) \
    	safelen(16) linear(i:1) aligned (pp:4)
      for (i = 0; i < 10; i++)
	{
	  r = r + 1;
	  p = q;
	  a[2+i] = p + q;
	  s = i * 10;
	}
    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
	num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) private(p) \
	reduction(+:r)
    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
    	collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    a[2+i*10+j] = p + q;
	    s = i * 10 + j;
	  }
    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
	num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) private(p) \
	reduction(+:r)
    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
    	lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) aligned (pp:4)
      for (i = 0; i < 10; i++)
	{
	  r = r + 1;
	  p = q;
	  a[2+i] = p + q;
	  s = i * 10;
	}
  }
}
Пример #8
0
void
test2 (int n, int o, int p, int r, int s, int *pp)
{
  int a[o];
    #pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate (q)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	  }
    #pragma omp distribute dist_schedule (static, 4) firstprivate (q)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	  }
    #pragma omp distribute parallel for if (n != 6) \
	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
    	collapse (2) dist_schedule (static, 4) \
    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
    	ordered schedule (static, 8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    dosomething (a, n, p + q);
	    #pragma omp ordered
	      p = q;
	    s = i * 10 + j;
	  }
    #pragma omp distribute parallel for if (n != 6) \
	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
    	num_threads (n + 4) dist_schedule (static, 4) \
    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
      for (i = 0; i < 10; i++)
	{
	  for (j = 0; j < 10; j++)
	    {
	      r = r + 1;
	      p = q;
	      dosomething (a, n, p + q);
	    }
	  #pragma omp ordered
	    p = q;
	  s = i * 10;
	}
    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	collapse (2) dist_schedule (static, 4) \
    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
    	schedule (static, 8) safelen(8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    a[2+i*10+j] = p + q;
	    s = i * 10 + j;
	  }
    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
    	private (p) firstprivate (q) shared (n) reduction (+: r) \
    	num_threads (n + 4) dist_schedule (static, 4) \
    	proc_bind (master) lastprivate (s) schedule (static, 8) \
    	safelen(16) linear(i:1) aligned (pp:4)
      for (i = 0; i < 10; i++)
	{
	  r = r + 1;
	  p = q;
	  a[2+i] = p + q;
	  s = i * 10;
	}
    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
    	collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
      for (i = 0; i < 10; i++)
	for (j = 0; j < 10; j++)
	  {
	    r = r + 1;
	    p = q;
	    a[2+i*10+j] = p + q;
	    s = i * 10 + j;
	  }
    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
    	lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) aligned (pp:4)
      for (i = 0; i < 10; i++)
	{
	  r = r + 1;
	  p = q;
	  a[2+i] = p + q;
	  s = i * 10;
	}
}
Пример #9
0
 void operator() (){
     for(int j = 0; j < 100; j++) {
         dosomething(i_);
     }
 }
Пример #10
0
// waits for a key press and returns it
char getkey(){
	while( !kbhit()){		// wait for key press
		dosomething();
	}
	return(NewKey);
}