Пример #1
0
void Timer::shotTimers()
{
  for( auto & shot : m_timerShots )
  {
    if ( shot.lastShot + shot.length < getElapsed() )
    {
      shot.simpleCallback();
      shot.lastShot = getElapsed();
    }
  }
}
Пример #2
0
boolean WAKEUP::wakeMeAfter( void (*sleeper)(void*), long ms, void *context, boolean treatAsISR) {
  unsigned long timeSinceLast;
   
  // Check the time requested and if there is a free bunk to store the sleeper
  if (ms == 0 || _numSleepers >= MAXSLEEPERS) return false;
 
  _oldSREG = SREG;
  cli();
  
  // If any already sleeping, then get the count from start of heartbeat and deduct from all active timers
  if (_numSleepers > 0 && (timeSinceLast = getElapsed()) > 0) {
    for (int i = 0; i < _numSleepers; i++) _bunks[i].timeToWake -= timeSinceLast; 
  }

  // Put new sleeper into top bunk and set alarm clock
  _bunks[_numSleepers].sleepDuration = ms;					// Save the delay to inform timerISR
  _bunks[_numSleepers].callback = sleeper;					// Put sleeper into bunk
  _bunks[_numSleepers].treatAsISR = treatAsISR;				// Interrupt on wake or put on pending queue
  _bunks[_numSleepers].context = context;					// Save its context
  _bunks[_numSleepers].timeToWake = (ms > 0) ? ms : -ms;	// Set the delay 

  _numSleepers++; 
  
  SREG = _oldSREG;  
      
  startHeartbeat();    // Set counter going with an appropriate heartbeat
     
  return true;  
}
Пример #3
0
int main(void)
{
    zmq::context_t context(1);
    zmq::socket_t socket(context,ZMQ_PUB);
    socket.bind("tcp://*:6002");


    int file;
    char *filename = "/dev/i2c-1";
    if ((file=open(filename,O_RDWR)) < 0){
            std::cout << "Failed to open the bus" << std::endl;
            return 1;
            }

    if (ioctl(file,I2C_SLAVE,0x29) < 0)
    {
        std::cout << "Failed to talk to slave" << std::endl;
        return 1;
    }
            

    pressure pboard(file);

    startTime();

    while(true)
    {
    std::ostringstream pss;
    pss << "2\t" << pboard.getPressure() << getElapsed() << std::endl;
    s_send(socket,pss.str())
    }
}
Пример #4
0
			double getElapsedSeconds() const 
			{
				rtc_u_int64_t const t = getElapsed();
				rtc_u_int64_t const s = t / 1000000;
				rtc_u_int64_t const u = t % 1000000;
				double ds = static_cast<double>(s);
				double du = static_cast<double>(u)/1000000.0;
				return (ds+du);
			}
Пример #5
0
const uint64_t Timer::Stop()
{
    uint64_t Elapsed = getElapsed();

	started = false;
	paused = false;
	StartTime = 0;
	PausedTime = 0;
    SampleIndex = 0;

    return Elapsed;
}
Пример #6
0
void RotateAround::update(float time)
{
	CC_UNUSED_PARAM(time);
    
    if(_target) {
        const float elapsed = getElapsed();
        const float percent = elapsed / _duration;
        const float angle = _reversed ? 360.f * -percent : 360.f * percent;
        
        const PolarCoord polarCoord {_startPolarCoord.r, _startPolarCoord.a + angle};
        Point pos = convertPolarToCartesian(polarCoord);
        pos = Point(_centerOfRotation.x + pos.x, _centerOfRotation.y + pos.y);
        _target->setPosition(pos);
    }
}
Пример #7
0
ConnectionId passiveOpen(Connection *lconn) {
	Connection *aconn = NULL;
	Segment syn, synack, acksynack;
	char ssyn[RUSP_SGMS + 1], ssynack[RUSP_SGMS + 1], sacksynack[RUSP_SGMS + 1];
	int asock, synackretrans;
	struct sockaddr_in caddr;
	struct timespec start, end;
	long double sampleRTT;

	while (getConnectionState(lconn) == RUSP_LISTEN) {

		readUSocket(lconn->sock.fd, &caddr, ssyn, RUSP_SGMS);

		deserializeSegment(ssyn, &syn);

		DBGFUNC(RUSP_DEBUG, printInSegment(caddr, syn));

		if (syn.hdr.ctrl != RUSP_SYN)
			continue;

		setConnectionState(lconn, RUSP_SYNRCV);

		asock = openSocket();

		synack = createSegment(RUSP_SYN | RUSP_SACK, 0, 10, RUSP_NXTSEQN(syn.hdr.seqn, 1), NULL);

		serializeSegment(synack, ssynack);

		for (synackretrans = 0; synackretrans < RUSP_RETR; synackretrans++) {

			clock_gettime(CLOCK_MONOTONIC, &start);

			writeUSocket(asock, caddr, ssynack, strlen(ssynack));

			DBGFUNC(RUSP_DEBUG, printOutSegment(caddr, synack));

			setConnectionState(lconn, RUSP_SYNSND);

			if (!selectSocket(asock, RUSP_SAMPLRTT))
				continue;

			readUSocket(asock, &caddr, sacksynack, RUSP_SGMS);

			clock_gettime(CLOCK_MONOTONIC, &end);

			sampleRTT = getElapsed(start, end);

			deserializeSegment(sacksynack, &acksynack);

			DBGFUNC(RUSP_DEBUG, printInSegment(caddr, acksynack));

			if ((acksynack.hdr.ctrl == RUSP_SACK) &
				(acksynack.hdr.seqn == synack.hdr.ackn) &
				(acksynack.hdr.ackn == RUSP_NXTSEQN(synack.hdr.seqn, 1))) {

				aconn = createConnection();

				setConnectionState(aconn, RUSP_SYNSND);

				setupConnection(aconn, asock, caddr, acksynack.hdr.ackn, acksynack.hdr.seqn, sampleRTT);

				setConnectionState(lconn, RUSP_LISTEN);

				return aconn->connid;
			}
		}

		closeSocket(asock);

		setConnectionState(lconn, RUSP_LISTEN);
	}

	return -1;
}
Пример #8
0
int activeOpen(Connection *conn, const struct sockaddr_in laddr) {
	Segment syn, synack, acksynack;
	char ssyn[RUSP_SGMS + 1], ssynack[RUSP_SGMS + 1], sacksynack[RUSP_SGMS + 1];
	int asock, synretrans;
	struct sockaddr_in aaddr;
	struct timespec start, end;
	long double sampleRTT;

	if (getConnectionState(conn) != RUSP_CLOSED)
		ERREXIT("Cannot synchronize connection: connection not closed.");

	asock = openSocket();

	syn = createSegment(RUSP_SYN, 0, 0, 0, NULL);

	serializeSegment(syn, ssyn);

	for (synretrans = 0; synretrans < RUSP_SYN_RETR; synretrans++) {

		clock_gettime(CLOCK_MONOTONIC, &start);

		writeUSocket(asock, laddr, ssyn, strlen(ssyn));

		DBGFUNC(RUSP_DEBUG, printOutSegment(laddr, syn));

		setConnectionState(conn, RUSP_SYNSND);

		if (!selectSocket(asock, RUSP_SAMPLRTT))
			continue;

		readUSocket(asock, &aaddr, ssynack, RUSP_SGMS);

		clock_gettime(CLOCK_MONOTONIC, &end);

		sampleRTT = getElapsed(start, end);

		deserializeSegment(ssynack, &synack);

		DBGFUNC(RUSP_DEBUG, printInSegment(aaddr, synack));

		if ((synack.hdr.ctrl == (RUSP_SYN | RUSP_SACK)) &
			(synack.hdr.ackn == RUSP_NXTSEQN(syn.hdr.seqn, 1))) {

			setConnectionState(conn, RUSP_SYNRCV);

			acksynack = createSegment(RUSP_SACK, 0, RUSP_NXTSEQN(syn.hdr.seqn, 1), RUSP_NXTSEQN(synack.hdr.seqn, 1), NULL);

			serializeSegment(acksynack, sacksynack);

			writeUSocket(asock, aaddr, sacksynack, strlen(sacksynack));

			DBGFUNC(RUSP_DEBUG, printOutSegment(aaddr, acksynack));

			setupConnection(conn, asock, aaddr, acksynack.hdr.seqn, acksynack.hdr.ackn, sampleRTT);

			return conn->connid;
		}
	}

	closeSocket(asock);

	setConnectionState(conn, RUSP_CLOSED);

	return -1;
}
Пример #9
0
void master(int size)
{
     //Number of process, number of rows, start and stop rows, remainding row
     int numProcs, count, start, stop, remainder;
     
     //Averages
     double avgTotal = 0.0, avgCT = 0.0, seqTime = 0.0;
     
     //How long calculation will take
     double wallTime = 0.0;
    
     //The status of our receiver
     MPI_Status status;

     int testRuns = 10;
     struct timeval startTT, startCT;

     //Allocation of matrix
     int* matA = allocMat(size);
     int* matB = allocMat(size);
     int* matC = allocMat(size);
     
     MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
     
     //This test 10 times
    // for (int i = 0; i < testRuns; i++)
  //   {
          
          double totalTime = 0.0, calcTime = 0.0, remTime = 0.0;  
 
          initMatrix(matA, matB, size);

          //Start total time 
          gettimeofday(&startTT, NULL);

          //Broadcast matrix A
          MPI_Barrier(MPI_COMM_WORLD);
          MPI_Bcast(matA, size * size, MPI_INT, 0, MPI_COMM_WORLD);
   
          //Broadcast matrix B
          MPI_Barrier(MPI_COMM_WORLD);
          MPI_Bcast(matB, size * size, MPI_INT, 0, MPI_COMM_WORLD);
     
          start = 0;
          // rows/thread
          stop = size / numProcs;
          //area/thread
          count = size * (size / numProcs);
          
          //Start calculation timer 
          gettimeofday(&startCT, NULL);

          subMatrixCal(matA, matB, matC, size, start, stop);
     
          //Stop calculation timer
          calcTime = getElapsed(&startCT);

          MPI_Gather(matC, count, MPI_INT, matC, count, MPI_INT, 0, MPI_COMM_WORLD);
          
          //Wall time = the slowest computation time
          for (int idx = 1; idx < numProcs; idx++)
          {
               MPI_Recv(&wallTime, 1, MPI_DOUBLE, idx, 0, MPI_COMM_WORLD, &status);
               
               if (wallTime > calcTime)
               {
                    calcTime = wallTime;
               }
              
          }

          remainder = size % numProcs;
          
          if (remainder > 0)
          {    
               //Start remainder timer
               gettimeofday(&startCT, NULL);
               
               remMatrixCal(matA, matB, matC, size, remainder);
               
               //Stop remainder timer
               remTime = getElapsed(&startCT);
          }

          //Add remainder time and calculation time
          calcTime += remTime;

          //Stop total time
          totalTime = getElapsed(&startTT);
     
          //Running total for average total and cal time
          avgTotal += totalTime;
          avgCT += calcTime;
         
   //  }
     
     //Average total and cal time
     avgTotal /= testRuns;
     avgCT /= testRuns;
     
     //Start sequential timer
     gettimeofday(&startCT, NULL);

     calMatrix(matA, matB, size);
     
     //Stop sequential timer
     seqTime = getElapsed(&startCT);
    
     //Find speed up and efficiency
     double speedUp = seqTime / avgCT;
     double efficiency = speedUp / numProcs;
 
     //Print stats
     printf("Size: %d x %d\n", size, size);
     printf("Speed up: %f\n", speedUp);
     printf("Efficiency: %f\n", efficiency);
     printf("Average Total Time: %f s\n", avgTotal);
     printf("Average Sequential time: %f s\n", seqTime);
     printf("Average Parallel time: %f s\n", avgCT);
     printf("Average Communication time: %f s\n\n", avgTotal - avgCT);

     deleteMat(matA, size);
     deleteMat(matB, size);
     deleteMat(matC, size);    

}
 void Creature::integrate()
 {
     velocity += accumulated * getElapsed() * 60.f;
     //velocity *= (1.0 + fear );
     //fear -= 0.2f * fear;
 }
Пример #11
0
void Timer::setUpTimer(TimeType length, SimpleCallback cb)
{
  m_timerShots.push_back( TimerShot(length, getElapsed(), cb) );
}
Пример #12
0
Timer::~Timer()
{
    stat.add(getElapsed());
}
Пример #13
0
int main(int argc, char** argv)
{
     
     //Number of CPUs
     int numProcs;
     //Processor ID
     int rank;
     
     //The status of our receiver
     MPI_Status status;
     
     //Init MPI, Starts the parallelization sort of. 
     MPI_Init(&argc, &argv);     
     
     //Finds out how many CPUs are in our network
     MPI_Comm_size(MPI_COMM_WORLD, &numProcs);

     //Determines the rank of a process
     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
     //Height and width of image will be passed in.
     int height = atoi(argv[1]);
     int width = atoi(argv[2]);
     
     Complex num;
     struct timeval start;
     double time = 0.0;
     
     //Mandelbrot Set will have lie in this plane. 
     //X range
     float realMax = 2.0;
     float realMin = -2.0;
     
     //Y range
     float imagMax = 2.0;
     float imagMin = -2.0;
     
     //Scale the image so that it can be seen at the give resolution.
     float scaleX = (realMax - realMin) / width;
     float scaleY = (imagMax - imagMin) / height;
     
     //Number of slaves
     int numGroups = numProcs - 1;
     
     //Number of remaining rows after even partitions for slave.
     int remainder = height % numGroups;
     
     //How height those partitions are.
     int grpHeight = (height - remainder) / numGroups;
     
     //The area of our partition
     int partArea = grpHeight * width;
     
     //Image array
     unsigned int* image 
          = (unsigned int *) malloc(sizeof(unsigned int) * height * width);

     unsigned int* buffer 
          = (unsigned int *) malloc(sizeof(unsigned int) * (width + 10));
     
     int DATA_TAG = 0;
     int TERMINATE = 1;
     
     MPI_Barrier(MPI_COMM_WORLD);

     if (rank == 0)
     {
          int count = 0;
          int row = 0;
                    
          //Starting the clock
          gettimeofday(&start, NULL); 
          
          for (int proc = 1; proc < numProcs; proc++)
          {
               MPI_Send(&row, 1, MPI_INT, proc, DATA_TAG, MPI_COMM_WORLD);    
               count++;
               row++;
          }     
          
          do 
          {
               MPI_Recv(buffer, width, MPI_UNSIGNED, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
               
              count--;
               
               if (row < height)
               {
                    MPI_Send(&row, 1, MPI_INT, status.MPI_SOURCE, DATA_TAG, MPI_COMM_WORLD);    
                    count++;
                    row++;
               }
               else
               {
                    MPI_Send(&row, 1, MPI_INT, status.MPI_SOURCE, TERMINATE, MPI_COMM_WORLD);
                    
               }
               
               for (int x = 0; x < width; x++)
               {
                    image[status.MPI_TAG * width + x] = buffer[x];
               }
               
                    
          } while (count > 0);
           
          //Stop the clock
          time = getElapsed(&start);

          //Output result
          printf("%d cores %dx%d: %fs\n", numProcs, height, width, time);
     
          //Calculate I/O time
          //gettimeofday(&start, NULL);
     
          //Display the set
          //writeImage("Static.ppm", image, height, width); 
     
          //Stop the clock
         // time = getElapsed(&start);
     
          //Output result
          //printf("Runtime for file I/O: %fs\n", time);
                   
     }
     else 
     {
          int row;
                
          MPI_Recv(&row, 1, MPI_INT, 0, DATA_TAG, MPI_COMM_WORLD, &status);
          //printf("Slave: %d Receive Init", rank);

          while (status.MPI_TAG != TERMINATE)
          {
              num.imag = imagMin + ((float) row * scaleY);

              for (int x = 0; x < width; x++)
              {
                    //Initialize Complex based on position.
                    num.real = realMin + ((float) x * scaleX);
                                
                    //Calculates the color of the current pixel.
                    buffer[x] = calPixel(num);
               }
               
               MPI_Send(buffer, width, MPI_UNSIGNED, 0, row, MPI_COMM_WORLD);
               //printf("Slave: %d Send row %d\n", rank, row);

               //Send only partition worked on
               MPI_Recv(&row, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
               //printf("Slave: %d Recv row %d\n", rank, row);
         }
         
     }
     free(buffer);
     free(image);
     MPI_Finalize();
     return 0;
}