Ejemplo n.º 1
0
void main(int argc, char* argv[]){
    int sequence[SEQUENCE_CNT];
    if (argc < 2){
        printf("Usage %s <seed>\n", argv[0]);
        exit(1);
    } else {
        seed = atoi(argv[1]);
    }

#if PAPI
   // printf("PAPI enabled.\n");
#else
   // printf("PAPI disabled.\n");
#endif
    generate_random_numbers(sequence);
    compute(sequence);
}
Ejemplo n.º 2
0
static void
init(void)
{
   if (piglit_is_extension_supported("GL_ARB_vertex_shader")) {
      glGetIntegerv(GL_MAX_TEXTURE_COORDS, &MaxTextureCoordUnits);
      glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &MaxTextureImageUnits);
      glGetIntegerv(GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, &MaxTextureVertexUnits);
      glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &MaxTextureCombinedUnits);
   }
   else if (piglit_is_extension_supported("GL_ARB_fragment_shader") ||
            piglit_is_extension_supported("GL_ARB_fragment_program")) {
      glGetIntegerv(GL_MAX_TEXTURE_COORDS, &MaxTextureCoordUnits);
      glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &MaxTextureImageUnits);
      MaxTextureVertexUnits = 0;
      MaxTextureCombinedUnits = MaxTextureImageUnits;
   }
   else {
      glGetIntegerv(GL_MAX_TEXTURE_UNITS, &MaxTextureCoordUnits);
      MaxTextureImageUnits =
      MaxTextureCombinedUnits = MaxTextureCoordUnits;
      MaxTextureVertexUnits = 0;
   }

   report_info();

   if (MaxTextureCombinedUnits > MAX_UNITS) {
      /* Need to increase the MAX_UNITS limit */
      piglit_report_result(PIGLIT_WARN);
   }

   generate_random_numbers();

   glMatrixMode(GL_PROJECTION);
   glLoadIdentity();
   glOrtho(0.0, 1.0, 0.0, 1.0, -1.0, 1.0);
   glMatrixMode(GL_MODELVIEW);
   glLoadIdentity();
}
Ejemplo n.º 3
0
int SpikeGenerator(double *synouttmp, double tdres, int totalstim, int nrep, double *sptime)
{
   	double  c0,s0,c1,s1,dead;
    int     nspikes,k,NoutMax,Nout,deadtimeIndex,randBufIndex;
    double	deadtimeRnd, endOfLastDeadtime, refracMult0, refracMult1, refracValue0, refracValue1;
    double	Xsum, unitRateIntrvl, countTime, DT;

    double *randNums;

    c0      = 0.5;
	s0      = 0.001;
	c1      = 0.5;
	s1      = 0.0125;
    dead    = 0.00075;

    DT = totalstim * tdres * nrep;  /* Total duration of the rate function */
    Nout = 0;
    NoutMax = (long) ceil(totalstim*nrep*tdres/dead);

    randNums = generate_random_numbers(NoutMax+1);
    randBufIndex = 0;

	/* Calculate useful constants */
	deadtimeIndex = (long) floor(dead/tdres);  /* Integer number of discrete time bins within deadtime */
	deadtimeRnd = deadtimeIndex*tdres;		   /* Deadtime rounded down to length of an integer number of discrete time bins */

	refracMult0 = 1 - tdres/s0;  /* If y0(t) = c0*exp(-t/s0), then y0(t+tdres) = y0(t)*refracMult0 */
	refracMult1 = 1 - tdres/s1;  /* If y1(t) = c1*exp(-t/s1), then y1(t+tdres) = y1(t)*refracMult1 */

	/* Calculate effects of a random spike before t=0 on refractoriness and the time-warping sum at t=0 */
    endOfLastDeadtime = __max(0,log(randNums[randBufIndex++]) / synouttmp[0] + dead);  /* End of last deadtime before t=0 */
    refracValue0 = c0*exp(endOfLastDeadtime/s0);     /* Value of first exponential in refractory function */
	refracValue1 = c1*exp(endOfLastDeadtime/s1);     /* Value of second exponential in refractory function */
	Xsum = synouttmp[0] * (-endOfLastDeadtime + c0*s0*(exp(endOfLastDeadtime/s0)-1) + c1*s1*(exp(endOfLastDeadtime/s1)-1));
        /* Value of time-warping sum */
		/*  ^^^^ This is the "integral" of the refractory function ^^^^ (normalized by 'tdres') */

	/* Calculate first interspike interval in a homogeneous, unit-rate Poisson process (normalized by 'tdres') */
    unitRateIntrvl = -log(randNums[randBufIndex++])/tdres;
	    /* NOTE: Both 'unitRateInterval' and 'Xsum' are divided (or normalized) by 'tdres' in order to reduce calculation time.
		This way we only need to divide by 'tdres' once per spike (when calculating 'unitRateInterval'), instead of
		multiplying by 'tdres' once per time bin (when calculating the new value of 'Xsum').                         */

	countTime = tdres;
	for (k=0; (k<totalstim*nrep) && (countTime<DT); ++k, countTime+=tdres, refracValue0*=refracMult0, refracValue1*=refracMult1)  /* Loop through rate vector */
	{
		if (synouttmp[k]>0)  /* Nothing to do for non-positive rates, i.e. Xsum += 0 for non-positive rates. */
		{
		  Xsum += synouttmp[k]*(1 - refracValue0 - refracValue1);  /* Add synout*(refractory value) to time-warping sum */

			if ( Xsum >= unitRateIntrvl )  /* Spike occurs when time-warping sum exceeds interspike "time" in unit-rate process */
			{
				sptime[Nout] = countTime; Nout = Nout+1;
				unitRateIntrvl = -log(randNums[randBufIndex++]) /tdres;
                 Xsum = 0;

			    /* Increase index and time to the last time bin in the deadtime, and reset (relative) refractory function */
				k += deadtimeIndex;
				countTime += deadtimeRnd;
				refracValue0 = c0;
				refracValue1 = c1;
			}
		}
	} /* End of rate vector loop */

	free(randNums);
	nspikes = Nout;  /* Number of spikes that occurred. */
	return(nspikes);
}
Ejemplo n.º 4
0
 game_core::game_core(int board_size):map_size(board_size),game_over(false),
                                 num_container(board_size)
 {
     reset_board();
     generate_random_numbers();
 }
Ejemplo n.º 5
0
int main( int argc, char *argv[] )
{
  // MPI-Variablen und Initialisierung
  int rank, size;
  MPI_Init(&argc, &argv);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Comm_size (MPI_COMM_WORLD, &size);

  // Zeitmesspunkte
  double time_start, time_gen, time_local_sort, time_since_last;
  double time_comm_1, time_comm_2, time_comm_3, time_comm_4, time_comm_5, time_comm_6, time_comm_7, time_comm_8;
  double time_org_1, time_org_2, time_org_3, time_org_4, time_org_5, time_org_6, time_org_7, time_org_8;

  // (minimalistischer) Plausibilitäts-Check der Übergabeparameter
  if ( argc < 2 )
  {
    if ( rank == 0 )
      printf( "Synopsis: psrs <size of random array> [<output mode=(silent|table|full)>]\n" );
    MPI_Finalize();
    return 1;
  }

  // Größe des zu erzeugenden Zahlenfeldes bestimmen
  int numbers_size = atoi( argv[1] );

  // Detailgrad der Ausgabe auf stdout bestimmen
  int output_level = 0;
  if ( argc == 3 )
  {
    if ( strcmp( argv[2], "full" ) == 0 )
      output_level = 0;
    if ( strcmp( argv[2], "table" ) == 0 )
      output_level = 1;
    if ( strcmp( argv[2], "silent" ) == 0 )
      output_level = 2;
  }

  // Zeitmessung initialisieren
  time_start = MPI_Wtime();
  time_since_last = time_start;

  // Zufallszahlen erzeugen
  int numbers[ numbers_size ];
  if (rank == 0) {
    // eigentliche Erzeugung
    generate_random_numbers(numbers, numbers_size);
    // Ausgabe der Aufrufparameter
    if ( output_level == 0 )
      printf("starting to sort %d values on %d nodes\n\n", numbers_size, size);
    if ( output_level == 1 )
      printf("%d %d ", numbers_size, size);
  }

  // Zeit für Zufallszahlengenerierung messen
  time_gen = MPI_Wtime() - time_since_last;
  time_since_last += time_gen;

  // Zufallszahlen gleichmäßig verteilen
  int temp_numbers_per_processor_size = numbers_size / size;
  // Anzahl der bei Abrundung von ( numbers_size / size ) übrig gebliebenen Zahlen
  int spare_numbers = numbers_size - temp_numbers_per_processor_size * size;
  int numbers_per_processor_sizes[ size ];
  for ( int pos = 0; pos < size; pos++ )
  {
    numbers_per_processor_sizes[ pos ] = temp_numbers_per_processor_size;
    // übrig gebliebene Zahlen auf erste Knoten aufteilen
    if (spare_numbers > 0)
    {
      numbers_per_processor_sizes[ pos ]++;
      spare_numbers--;
    }
  }
  
  // Organisationszeit 1
  time_org_1 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_1;

  int numbers_per_processor_size;
  MPI_Scatter(numbers_per_processor_sizes, 1, MPI_INT, &numbers_per_processor_size, 1, MPI_INT, 0, MPI_COMM_WORLD); // TODO avoid by accessing numbers_per_processor_sizes[rank] since each processor knows arguments and cluster size
  
  // Kommunikationszeit 1
  time_comm_1 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_1;

  int scatter_displacements[size];
  displacements(scatter_displacements, numbers_per_processor_sizes, size); // TODO root process only
  
  // Organisationszeit 2
  time_org_2 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_2;

  int numbers_per_processor[ numbers_per_processor_size ];
  MPI_Scatterv(numbers, numbers_per_processor_sizes, scatter_displacements, MPI_INT, numbers_per_processor,
    numbers_per_processor_size, MPI_INT, 0, MPI_COMM_WORLD);

  // Kommunikationszeit 2
  time_comm_2 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_2;

  // lokal sortieren
  quicksort( numbers_per_processor, 0, numbers_per_processor_size-1 );

  // Zeit für lokales Sortieren
  time_local_sort = MPI_Wtime() - time_since_last;
  time_since_last += time_local_sort;

  // repräsentative Auswahl erstellen
  int w = numbers_size / ( size * size );
  int representative_selection[ size ];
  for( int pos=0; pos < size; pos++ )
    representative_selection[ pos ] = numbers_per_processor[ pos * w ]; // FIXME should be pos * w + 1

  // Organisationszeit 3
  time_org_3 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_3;

  // Auswahl auf einem Knoten einsammeln
  int selected_numbers[ size * size ];
  MPI_Gather( representative_selection, size, MPI_INT, selected_numbers, size, MPI_INT, 0, MPI_COMM_WORLD );

  // Kommunikationszeit 3
  time_comm_3 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_3;

  // Auswahl sortieren
  int pivots[ size - 1 ];
  if ( rank == 0 )
  {
    quicksort( selected_numbers, 0, size * size - 1 );
    // Pivots selektieren
    int t = size / 2;
    for ( int pos = 1; pos < size; pos++ )
      pivots[ pos - 1 ] = selected_numbers[ pos * size + t ];
  }

  // Organisationszeit 4
  time_org_4 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_4;

  // Pivots verteilen
  MPI_Bcast( pivots, size - 1, MPI_INT, 0, MPI_COMM_WORLD );

  // Kommunikationszeit 4
  time_comm_4 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_4;

  // Blockbildung
  int block_sizes[ size ];
  divide_into_blocks( block_sizes, size, numbers_per_processor, numbers_per_processor_size, pivots );

  // Organisationszeit 5
  time_org_5 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_5;

  // Blöcke nach Rang an Knoten versenden
  int receive_block_sizes[ size ];
  MPI_Alltoall( block_sizes, 1, MPI_INT, receive_block_sizes, 1, MPI_INT, MPI_COMM_WORLD );
  
  // Kommunikationszeit 5
  time_comm_5 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_5;

  int receive_block_displacements[ size ];
  displacements( receive_block_displacements, receive_block_sizes, size );
  int block_displacements[ size ];
  displacements( block_displacements, block_sizes, size );

  // Organisationszeit 6
  time_org_6 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_6;

  int blocksize = sum( receive_block_sizes, size );
  int blocks_per_processor[ blocksize ];
  MPI_Alltoallv( numbers_per_processor, block_sizes, block_displacements, MPI_INT, blocks_per_processor,
    receive_block_sizes, receive_block_displacements, MPI_INT, MPI_COMM_WORLD );

  // Kommunikationszeit 6
  time_comm_6 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_6;

  // jeder Knoten sortiert seine Blöcke
  quicksort( blocks_per_processor, 0, blocksize -1 );

  // Organisationszeit 7
  time_org_7 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_7;

  // sortierte Blöcke einsammeln
  // TODO kann durch MPI_Allgather statt MPI_Alltoall vermieden werden
  int blocksizes[ size ];
  MPI_Gather( &blocksize, 1, MPI_INT, blocksizes, 1, MPI_INT, 0, MPI_COMM_WORLD );

  // Kommunikationszeit 7
  time_comm_7 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_7;

  int receive_sorted_displacements[ size ];
  displacements( receive_sorted_displacements, blocksizes, size );

  // Organisationszeit 8
  time_org_8 = MPI_Wtime() - time_since_last;
  time_since_last += time_org_8;

  int sorted[ numbers_size ];
  MPI_Gatherv( blocks_per_processor, blocksize, MPI_INT, sorted, blocksizes, receive_sorted_displacements,
    MPI_INT, 0, MPI_COMM_WORLD );

  // Kommunikationszeit 8
  time_comm_8 = MPI_Wtime() - time_since_last;
  time_since_last += time_comm_8;

  // lokale Sortierzeiten zusammenrechnen
  double time_comm = time_comm_1 + time_comm_2 + time_comm_3 + time_comm_4
    + time_comm_5 + time_comm_6 + time_comm_7 + time_comm_8;
  double time_org = time_org_1 + time_org_2 + time_org_3 + time_org_4
    + time_org_5 + time_org_6 + time_org_7 + time_org_8;
  double time_local_sort_total = 0.0;
  MPI_Reduce( &time_local_sort, &time_local_sort_total, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );
  double time_comm_total = 0.0;
  MPI_Reduce( &time_comm, &time_comm_total, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );
  double time_org_total = 0.0;
  MPI_Reduce( &time_org, &time_org_total, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );

  // Fertig!
  if ( rank == 0 )
  {
    // Durchschnittswerte
    double time_local_sort_mean = time_local_sort / size;
    double time_comm_mean = time_comm / size;
    double time_org_mean = time_org / size;
    // Konvertierung in Millisekunden
    time_gen *= 1000;
    time_local_sort_mean *= 1000;
    time_comm_mean *= 1000;
    time_org_mean *= 1000;
    // Ausgabe
    if ( output_level == 0 )
    {
      print_array( sorted, numbers_size );
      printf( "\ntime measurement:\n" );
      printf( "number generation   = %.15f msec\n", time_gen );
      printf( "local sort (avg)    = %.15f msec\n", time_local_sort_mean );
      printf( "communication (avg) = %.15f msec\n", time_comm_mean );
      printf( "organization (avg)  = %.15f msec\n", time_org_mean );
      printf( "-------------------\n" );
      printf( "total time (no gen) = %.15f msec\n", time_local_sort_mean + time_comm_mean + time_org_mean );
      printf( "total time          = %.15f msec\n", time_local_sort_mean + time_comm_mean + time_org_mean + time_gen );
    }
    if ( output_level == 1 )
    {
      // Reihenfolge wie bei Outputlevel 0
      printf( "%.15f %.15f %.15f %.15f %.15f %.15f ",
	      time_gen,
	      time_local_sort_mean,
	      time_comm_mean,
	      time_org_mean,
	      time_local_sort_mean + time_comm_mean + time_org_mean,
	      time_local_sort_mean + time_comm_mean + time_org_mean + time_gen
      );
      if ( is_sorted( sorted, numbers_size ) == 1 )
	printf( "valid\n" );
      else
	printf( "invalid\n" );
    }
  }
  MPI_Finalize();
  return 0;
}