Пример #1
0
int main(int argc, char *argv[]) {
   char *buf;
   int done[NUMOPS];
   int fd[NUMOPS];
   int i;
   int numbytes, numfiles;

   if (argc < 2) {
      fprintf(stderr, "Usage: %s filename1 filename2 ...\n", argv[0]);
      return 1;
   } else if (argc > NUMOPS + 1) {
      fprintf(stderr, "%s: only supports %d simultaneous operations\n",
              argv[0],  NUMOPS);
      return 1;
   }
   numfiles = argc - 1;

   for (i = 0; i < numfiles; i++)  {            /* set up the I/O operations */
      done[i] = 0;
      if ((fd[i] = open(argv[i+1], O_RDONLY)) == -1) {
         fprintf(stderr, "Failed to open %s:%s\n", argv[i+1], strerror(errno));
         return 1;
      }
      if (initaio(fd[i], i) == -1) {
         fprintf(stderr, "Failed to setup I/O op %d:%s\n", i, strerror(errno));
         return 1;
      }
      if (readstart(i) == -1) {
         fprintf(stderr, "Failed to start read %d:%s\n", i, strerror(errno));
         return 1;
      }
   }
   for (  ;  ;  ) {                                         /* loop and poll */
      dowork();
      for (i = 0; i < numfiles; i++) {
         if (done[i])
            continue;
         numbytes = readcheck(i, &buf);
         if ((numbytes == -1) && (errno == EINPROGRESS))
            continue;
         if (numbytes <= 0) {
            if (numbytes == 0)
               fprintf(stderr, "End of file on %d\n", i);
            else
               fprintf(stderr, "Failed to read %d:%s\n", i, strerror(errno));
            done[i] = 1;
            continue;
         }
         processbuffer(i, buf, numbytes);
         reinit(i);
         if (readstart(i) == -1) {
            fprintf(stderr, "Failed to start read %d:%s\n", i, strerror(errno));
            done[i] = 1;
         }
      }
   }
}
Пример #2
0
int main(int argc, char ** argv)
{

  int nprocs,rank;

  MPI_Init(&argc,&argv);
  MPI_Comm_size(MPI_COMM_WORLD,&nprocs);  
  MPI_Comm_rank(MPI_COMM_WORLD,&rank);

  printf("nprocs: %d rank: %d\n",nprocs,rank);  

  int * array = malloc( ARRAY_SIZE * sizeof(int) );
  int i,j,jmax = 50;

  for ( j=0; j<jmax; j++) {

    int tag = j + 1;
    for (i=0;i<ARRAY_SIZE;i++) {
      array[i] = rank * i + j;
    }

    //printf("process: %d j: %d\n",rank,j);    

    if ( rank == 0 && nprocs > 1 ) {
       for (i=1; i<nprocs; i++) {
          MPI_Recv(array,ARRAY_SIZE,MPI_INT,i,tag,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
          float junk = dowork();
          if ( junk == 0.00 ) printf("Broken\n");
       }
    }
    else {
       MPI_Send(array,ARRAY_SIZE,MPI_INT,0,tag,MPI_COMM_WORLD);
       float junk = dowork();
       if ( junk == 0.00 ) printf("Broken\n");
    }

  }

  free(array);
  MPI_Finalize();
  return 0;

}
Пример #3
0
int dump_dives(program_options_t *options) {
    dc_family_t backend = DC_FAMILY_NULL;
    dc_loglevel_t loglevel = DC_LOGLEVEL_WARNING;
    const char *logfile = "output.log";
    const char *name = NULL;
    const char *fingerprint = NULL;
    unsigned int model = 0;

    if (options->backend != NULL) {
        backend = lookup_type(options->backend);
    }
    signal (SIGINT, sighandler);

    message_set_logfile(logfile);

    dc_context_t *context = NULL;

    /* create a new context */
    dc_status_t rc = dc_context_new(&context);
    if (rc != DC_STATUS_SUCCESS) {
        message_set_logfile(NULL);
        return EXIT_FAILURE;
    }

    dc_context_set_loglevel(context, loglevel);
    dc_context_set_logfunc(context, logfunc, NULL);

    dc_descriptor_t *descriptor = NULL;
    rc = search(&descriptor, name, backend, model);
    if (rc != DC_STATUS_SUCCESS) {
        message_set_logfile(NULL);
        return EXIT_FAILURE;
    }

    /* fail if no device descriptor found */
    if (descriptor == NULL) {
        WARNING("No matching device found");
        /* FIXME: bail out to usage information */
        message_set_logfile(NULL);
        return EXIT_FAILURE;
    }

    dc_buffer_t *fp = fpconvert(fingerprint);
    rc = dowork(context, descriptor, options, fp);
    dc_buffer_free(fp);
    /* FIXME: why aren't calls to errmsg working? */
    // message("Result: %s\n", errmsg(rc));

    dc_descriptor_free(descriptor);
    dc_context_free(context);

    message_set_logfile(NULL);

    return rc != DC_STATUS_SUCCESS ? EXIT_FAILURE : EXIT_SUCCESS;
}
Пример #4
0
Type objective_function<Type>::operator() ()
{
  PARAMETER_ARRAY(x);
  int m=x.cols();
  Type res=0;
  int n=400;
  vector<Type> tmp(n);
  tmp.setZero();
  for(int i=0;i<m;i++)tmp+=dowork(vector<Type>(x.col(i)));
  res=tmp.sum();
  return res;
}
int main(int argc, char **argv)
{
  nptsside = atoi(argv[1]);
  print_node = atoi(argv[2]);
  side2 = nptsside / 2.0;
  side4 = nptsside / 4.0;

  

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &nnodes);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

  mpi_chunksize = nptsside/nnodes;

  struct timespec bgn,nd;
  clock_gettime(CLOCK_REALTIME, &bgn);

  scram = rpermute(nptsside, 0);
  MPI_Scatter(scram, mpi_chunksize, MPI_INT, scram, mpi_chunksize, MPI_INT, 0, MPI_COMM_WORLD);

  
  dowork();
  dowork();
  dowork();
  dowork();
  dowork();

  

  clock_gettime(CLOCK_REALTIME, &nd);

  if (my_rank == print_node) 
    printf("The total count is %d,\t and the average time of 5 runs is %f\n", tot_count,timediff(bgn,nd)/5);
  

  MPI_Finalize();

}
Пример #6
0
Type objective_function<Type>::operator() ()
{
  #ifdef _OPENMP
  this->max_parallel_regions=omp_get_max_threads();
  #endif

  PARAMETER_ARRAY(x);
  int m=x.cols();
  Type res=0;
  int n=400;
  vector<Type> tmp(n);
  tmp.setZero();
  for(int i=0;i<m;i++){
    PARALLEL_REGION tmp+=dowork(vector<Type>(x.col(i)));
  }
  res=tmp.sum();
  return res;
}
Пример #7
0
/* The recursive routine, dirty talker, directory walker */
static void dowork(const char *path)
{
	DIR *dir=NULL;
	struct dirent *d=NULL;
	struct stat s;
	char buf[PATH_MAX];

	assert(path);

	/* See what the thing passed in is */
	if(lstat(path, &s)<0) {
		perror(path);
		return;
	}

	/* We want to do something different for directories and other things. */
	if(S_ISDIR(s.st_mode)) {
		dir=opendir(path);
		if(dir==NULL) {
			perror(path);
		    return;
		}
		while((d=readdir(dir)) != NULL) {
			if(! (strcmp(d->d_name, ".")==0||strcmp(d->d_name, "..")==0) ) {
				if(strlen(path)+strlen(d->d_name)+2<PATH_MAX) {
					if(path[strlen(path)-1]=='/') {
						sprintf(buf, "%s%s", path, d->d_name);
					} else {
						sprintf(buf, "%s/%s", path, d->d_name);
					}
					dowork(buf);
				} else {
		    		printf("ERROR!!!  Filename too long for buffer: %s/%s\n",
                       	path, d->d_name);
				}
			} 
		}
		closedir(dir);
	} else {
		check(path, s);
	}
}
Пример #8
0
main()
{
    int mytid;                  /* my task id */
    int *tids;                  /* array of task ids */
    int me;                     /* my process number */
    int i;
	int ntids;

    /* enroll in pvm */
    mytid = pvm_mytid();

	/* determine the size of my sibling list */

	ntids = pvm_siblings(&tids);

	for (i = 0; i < ntids; i ++)
		if ( tids[i] == mytid)
		{
			me = i;
			break;
		}
		
	if (me == 0)
	{
		printf("Pass a token through the %3d tid ring:\n", ntids);
		for (i = 0; i < ntids; i ++)
		{
			printf( "%6d -> ", tids[i]);
			if (i % 6 == 0 && i > 0)
				printf("\n");	
		}
		printf("%6d \n", tids[0]);
	}
/*--------------------------------------------------------------------------*/
     
     dowork( me, ntids, tids );

     /* program finished exit pvm */
     pvm_exit();
     exit(1);
}
Пример #9
0
int main(void)
{
	pool.preload_threads(8); // reserve 8 threads for work
	printf("This PC has %d processors\n", get_processor_count());
	for (int numThreads = 1; numThreads <= MAX_THREADS_TO_RUN; numThreads *= 2) {
		printf("Using %d processors for internal calculations.\n", numThreads);
		DoWork dowork(numThreads);
		time_t xt = time(NULL);
		pool.run(&dowork, numThreads);
		xt = time(NULL) - xt;
		
		double sum = 0;
		for (int i = 0; i < numThreads; i++) {
			printf("Processor %d gave %.5lf as result\n", i, dowork.data[i]);
			sum += dowork.data[i];
		}
		printf("pi = %.9lf\n", sqrt(sum * 6.0));
		printf("Result produced in %u sec\n", (unsigned)xt);
	}
	return 0;
}
Пример #10
0
int main(int argc, char *argv[]) {
   char buf[BLKSIZE];
   int done = 0;
   int error;
   int fd1;
   int fd2;
                                        /* open the file descriptors for I/O */
   if (argc != 3) {
      fprintf(stderr, "Usage: %s filename1 filename2\n", argv[0]);
      return 1; 
   }
   if ((fd1 = open(argv[1], O_RDONLY)) == -1) {
      fprintf(stderr, "Failed to open %s:%s\n", argv[1], strerror(errno));
      return 1; 
   }
   if ((fd2 = open(argv[2], O_WRONLY | O_CREAT | O_TRUNC, MODE)) == -1) {
      fprintf(stderr, "Failed to open %s: %s\n", argv[2], strerror(errno));
      return 1;
   }
   if (initsignal(SIGRTMAX) == -1) {
      perror("Failed to initialize signal");
      return 1;
   }
   if (initread(fd1, fd2, SIGRTMAX, buf, BLKSIZE) == -1) {
      perror("Failed to initate the first read");
      return 1;
   }
   for ( ; ; ) {
      dowork();
      if (!done)
         if (done = getdone())
            if (error = geterror())
               fprintf(stderr, "Failed to copy file:%s\n", strerror(error));
            else
               fprintf(stderr, "Copy successful, %d bytes\n", getbytes());
   }
}
int main(int argc, char *argv[])
{

  nptsside = atoi(argv[1]);
  print_node = atoi(argv[2]);
  nodenum = (int) atoi(argv[3]);
  ppnnum = (int) atoi(argv[4]);
  tasktype =  argv[5][0];
  side2 = nptsside / 2.0;
  side4 = nptsside / 4.0;


  int provided, claimed;
  MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided );
  MPI_Query_thread( &claimed );

//  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &nnodes);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

  if (my_rank == print_node) {
    printf( "Query thread level= %d  Init_thread level= %d\n", claimed, provided );
    printf( "Defined LEVEL= %d  (ompi_info | grep -i thread) \n", MPI_THREAD_MULTIPLE);
  }
	        

  mpi_chunksize = nptsside/nnodes;

  struct timespec bgn,nd;
  clock_gettime(CLOCK_REALTIME, &bgn);

  #ifdef RC
  scram = rpermute(nptsside, 0);

  MPI_Scatter(scram, mpi_chunksize, MPI_INT, scram, mpi_chunksize, MPI_INT, 0, MPI_COMM_WORLD);

  #else
    findmyrange(nptsside, nnodes, my_rank, myrange);
    scram = rpermute(mpi_chunksize, myrange[0]);
    printf("My range is %d %d \n", myrange[0], myrange[1]);
  #endif

  
  dowork();

  //implied barrier

  clock_gettime(CLOCK_REALTIME, &nd);

  if (my_rank == print_node) {
    printf("Random chunk RC is defined\n");
    printf("time:%g ::  maxiteration:%d ::  Pixel:%d :: threadnum:%d :: mpi_chunksize:%d :: tot_count:%d :: count:%d :: nodenum:%d :: ppnnum:%d :: tasktype:%c\n",
        timediff(bgn,nd),MAXITERS, nptsside,nnodes,mpi_chunksize,tot_count,count,nodenum,ppnnum,tasktype);
    FILE *fp;
    fp = fopen("OMP_MPI_mand.txt","a");
    fprintf(fp, "time:%g :: maxiteration:%d :: Pixel:%d :: threadnum:%d :: mpi_chunksize:%d :: tot_count:%d :: count:%d :: nodenum:%d :: ppnnum:%d :: tasktype:%c\n",
        timediff(bgn,nd),MAXITERS,nptsside,nnodes,mpi_chunksize,tot_count,count,nodenum,ppnnum,tasktype);
    fclose(fp);

  }

  MPI_Finalize();

}
Пример #12
0
int main (int argc, char **argv)
{
  char hnbuf[64];
  int rank;
  int thread;
  int lo;
  int hi;
  cpu_set_t coremask;
  int ret;

  int c;                    // for spinning
  int spin = 0;             // for spinning: default is do not
  int timework = 0;         // time a fixed amount of work
  const int totiter = 1024; // total iterations for all tasks
  int niter;                // number of iterations for this task
  float results[niter];     // results returned from dowork
  int nranks;               // size of MPI_COMM_WORLD

  void dowork (const int, float *);

  if ((ret = MPI_Init (&argc, &argv)) != 0) {
    printf ("MPI_Init failed\n");
    return -1;
  }

  (void) MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  (void) MPI_Comm_size (MPI_COMM_WORLD, &nranks);

  // begin additions for spinning
  while ((c = getopt (argc, argv, "sw")) != -1) {
    switch (c) {
    case 's':
      spin = 1;
      break;
    case 'w':
      timework = 1;
      break;
    default:
      printf ("unknown option %c\n", c);
      (void) MPI_Abort (MPI_COMM_WORLD, 1);
      break;
    }
  }

  if (spin && timework) {
    printf ("spin and timework may not both be true\n");
    (void) MPI_Abort (MPI_COMM_WORLD, 1);
  }

  (void) gethostname (hnbuf, sizeof (hnbuf));
#pragma omp parallel private (thread, coremask)
  {
    thread = omp_get_thread_num ();
    // Passing zero means use the calling process
    (void) sched_getaffinity (0, sizeof (coremask), &coremask);
    runnable (&coremask, &lo, &hi);
    #pragma omp barrier
    printf ("Rank %d thread %d on %s. (Runnable range: lo=%d hi=%d)\n",
	    rank, thread, hnbuf, lo, hi);
    // Put in a spin loop
    if (spin)
      while (1);
  }

  if (timework) {
    if (totiter % nranks != 0) {
      printf ("nranks=%d needs to divide evenly into totiter=%d\n", nranks, totiter);
      (void) MPI_Abort (MPI_COMM_WORLD, 1);
    }
    niter = totiter / nranks;
    dowork (niter, results);
  }

  MPI_Finalize ();
  return 0;
}
Пример #13
0
int
test_eyal1(void)
#endif
{
  int		i;

  assert(NULL != (tcs = (TC *) calloc (nthreads, sizeof (*tcs))));

  /* 
   * Launch threads
   */
  for (i = 0; i < nthreads; ++i)
    {
      tcs[i].id = i;

      assert(pthread_mutex_init (&tcs[i].mutex_start, NULL) == 0);
      assert(pthread_mutex_init (&tcs[i].mutex_started, NULL) == 0);
      assert(pthread_mutex_init (&tcs[i].mutex_end, NULL) == 0);
      assert(pthread_mutex_init (&tcs[i].mutex_ended, NULL) == 0);

      tcs[i].work = 0;  

      assert(pthread_mutex_lock (&tcs[i].mutex_start) == 0);
      assert((tcs[i].stat = 
	      pthread_create (&tcs[i].thread,
			      NULL,
                  (void *(*)(void *))print_server,
                (void *) &tcs[i])
	      ) == 0);

      /* 
       * Wait for thread initialisation
       */
      {
	int trylock = 0;

	while (trylock == 0)
	  {
	    trylock = pthread_mutex_trylock(&tcs[i].mutex_started);
	    assert(trylock == 0 || trylock == EBUSY);

	    if (trylock == 0)
	      {
		assert(pthread_mutex_unlock (&tcs[i].mutex_started) == 0);
	      }
	  }
      }
    }

  dowork ();

  /*
   * Terminate threads
   */
  todo = -2;	/* please terminate */
  dosync();

  for (i = 0; i < nthreads; ++i)
    {
      if (0 == tcs[i].stat)
	assert(pthread_join (tcs[i].thread, NULL) == 0);
    }

  /* 
   * destroy locks
   */
  assert(pthread_mutex_destroy (&mutex_stdout) == 0);
  assert(pthread_mutex_destroy (&mutex_todo) == 0);

  /*
   * Cleanup
   */
  printf ("\n");

  /*
   * Show results
   */
  for (i = 0; i < nthreads; ++i)
    {
      printf ("%2d ", i);
      if (0 == tcs[i].stat)
	printf ("%10ld\n", tcs[i].work);
      else
	printf ("failed %d\n", tcs[i].stat);

      assert(pthread_mutex_unlock(&tcs[i].mutex_start) == 0);

      assert(pthread_mutex_destroy (&tcs[i].mutex_start) == 0);
      assert(pthread_mutex_destroy (&tcs[i].mutex_started) == 0);
      assert(pthread_mutex_destroy (&tcs[i].mutex_end) == 0);
      assert(pthread_mutex_destroy (&tcs[i].mutex_ended) == 0);
    }

  die (0);

  return (0);
}
Пример #14
0
int main(int argc, char **argv)
{
	dowork(argv[1]);
}
Пример #15
0
int
main(int argc, char *argv[])
{
    dowork();
    return 0;
}