예제 #1
0
/**
 * A callback that will be registered to the JVMTI_EVENT_OBJECT_FREE event which will call it when ever a tagged object
 * has been freed.
 *
 * @param jvmti a pointer to the JVMTI struct to allow access to the error API.
 * @param tag   the tag of the object that has been freed.
 */
void JNICALL objectFreeCallBack(jvmtiEnv *jvmti, jlong tag) {

    char *className = (char*) tag;

    printCSV("DELETE", className);

    jvmtiError  error = (*jvmti)->Deallocate(jvmti, (unsigned char*) className);
    handleError(jvmti, error, "Unable to deallocate className.");
}
예제 #2
0
/**
 * Log an object creation event.
 *
 * @param jvmti a pointer to the JVMTI struct to allow access to the error API.
 * @param object a struct that represents the object that has been allocated.
 * @param klass  a struct that represents the class of the allocated object.
 */
void logADDEvent(jvmtiEnv *jvmti, jobject object, jclass klass) {

    char *className = getClassName(jvmti, klass);

    if (className) {

        jvmtiError error = (*jvmti)->SetTag(jvmti, object, (jlong) className);
        handleError(jvmti, error, "Could not tag object.");

        printCSV("ADD", className);
    }
}
예제 #3
0
void runFifoOnce(vector<Process> set){
   cout << "****** FIFO DATA SET ******" << endl;
   ReturnFIFO r = runFifo(set);
   int totalWaitTimes = r.totalWaitTimes;
   vector<int> totalWait= r.totalWait;
   int totalCycles = r.totalCycles;
   int totalContextSwitches = r.totalContextSwitches;
 
   float averageWaitTime = totalWaitTimes / set.size();
   cout << "Average wait time was: " << averageWaitTime << " cycles" << endl;
   cout << "There were " << totalContextSwitches - 1 << " context switches totaling a penalty of " << (totalContextSwitches-1)*CONTEXTSWITCH << " cycles " << endl;
   cout << endl;
   printCSV(set,totalWait);

   printContextSwitchInfo((totalContextSwitches-1)*CONTEXTSWITCH,totalCycles);
}
예제 #4
0
파일: zombie.c 프로젝트: xhkz/zombie
int main(int argc, char **argv)
{

    int threadsLimit = 0;

    int c;
    while ((c = getopt (argc, argv, "n:t")) != -1)
    {
        switch (c)
        {
        case 'n':
            threadsLimit = atoi(optarg);
            break;
        case 't':
            benchmark = true;
            break;
        default:
            ;
        }
    }
    
    if (threadsLimit) {
        omp_set_dynamic(0);
        omp_set_num_threads(threadsLimit);
    }
    
    clock_t start;
    start = clock();

    bool *locks = (bool *)malloc((SIZEX + 2) * sizeof(bool));
    for (int i = 0; i < SIZEX + 2; i++)
        locks[i] = false;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    initRandom(0, rank);
    Entity **matrix_a = createMatrix(SIZEX + 2, SIZEY + 2);
    Entity **matrix_b = createMatrix(SIZEX + 2, SIZEY + 2);
    initMatrix(matrix_a, SIZEX, SIZEY);

    MPI_Type_contiguous(sizeof(Entity), MPI_BYTE, &cell_t);
    MPI_Type_commit(&cell_t);
    MPI_Type_vector(SIZEX + 2, 1, 1, cell_t, &row_t);
    MPI_Type_commit(&row_t);
    MPI_Type_contiguous(sizeof(Counter), MPI_BYTE, &counter_t);
    MPI_Type_commit(&counter_t);

    Entity * northBuffer = (Entity *) malloc((SIZEX + 2) * sizeof(Entity));
    Entity * southBuffer = (Entity *) malloc((SIZEX + 2) * sizeof(Entity));

    if (!benchmark) {
        // update local counter and sync
        updateCounter(matrix_a);
        syncCounter();
        printHeader(rank);
        printCSV(0, rank);
    }

    for (int n = 0; n < STEPS; n++)
    {
        // set adjacent borders
        if (rank == NORTH)
        {
            MPI_Recv(northBuffer, 1, row_t, SOUTH, TAG, MPI_COMM_WORLD, &status);
            setBorder(northBuffer, matrix_a, SIZEY+1);

            setBuffer(matrix_a, northBuffer, SIZEY);
            MPI_Send(northBuffer, 1, row_t, SOUTH, TAG, MPI_COMM_WORLD);
        }

        if (rank == SOUTH)
        {
            setBuffer(matrix_a, southBuffer, 1);
            MPI_Send(southBuffer, 1, row_t, NORTH, TAG, MPI_COMM_WORLD);

            MPI_Recv(southBuffer, 1, row_t, NORTH, TAG, MPI_COMM_WORLD, &status);
            setBorder(southBuffer, matrix_a, 0);
        }

        #pragma omp parallel for default(none) shared(matrix_a, matrix_b, n, locks) schedule(static, SIZEX/omp_get_max_threads())
        for (int i = 1; i <= SIZEX; i++)
        {
            lock(i, locks);
            #pragma omp parallel for
            for (int j = 1; j <= SIZEY; j++)
                process(matrix_a, matrix_b, i, j);

            unlock(i, locks);
        }

        // merge adjacent border
        if (rank == NORTH)
        {
            MPI_Recv(northBuffer, 1, row_t, SOUTH, TAG, MPI_COMM_WORLD, &status);
            mergeGhost(northBuffer, matrix_b, SIZEY);

            setGhost(matrix_b, northBuffer, SIZEY+1);
            MPI_Send(northBuffer, 1, row_t, SOUTH, TAG, MPI_COMM_WORLD);
        }

        if (rank == SOUTH)
        {
            setGhost(matrix_b, southBuffer, 0);
            MPI_Send(southBuffer, 1, row_t, NORTH, TAG, MPI_COMM_WORLD);

            MPI_Recv(southBuffer, 1, row_t, NORTH, TAG, MPI_COMM_WORLD, &status);
            mergeGhost(southBuffer, matrix_b, 1);
        }

        // clear original adjacent border in matrix_a
        for (int i = 0; i < SIZEX + 2; i++)
            clearEntity(&matrix_a[i][rank == NORTH ? SIZEY + 1 : 0]);

        //some times it can not move back, then stay in the border
        transferInBorder(matrix_a, matrix_b);
        moveBackInBorder(matrix_b);

        // swap matrixes
        Entity **matrix_t = matrix_a;
        matrix_a = matrix_b;
        matrix_b = matrix_t;

        if (!benchmark)
        {
            updateCounter(matrix_a);
            syncCounter();
            printCSV(n+1, rank);
        }
    }
    
    if (benchmark)
        printf("Thread: %d, Time: %f sec\n", omp_get_max_threads(), (double)(clock() - start) / CLOCKS_PER_SEC);

    destroyMatrix(matrix_a);
    destroyMatrix(matrix_b);
    
    free(northBuffer);
    free(southBuffer);

    MPI_Finalize();

    return 0;
}
예제 #5
0
/*Todo: If we really care protect the variables by a mutex...*/
static void*
transIDCleanup(void* ptr)
{
  struct timespec timer;
  struct timespec remaining;
  (void) ptr;
  unsigned long long a[5], b[5];
  /* char str[7]; */
  FILE* fp;

  timer.tv_sec  = 1;
  timer.tv_nsec = 00000000;

  for (;; )
  {
    fp = fopen("/proc/stat","r");
    if (fp)
    {
      fscanf(fp,"%*s %llu %llu %llu %llu %llu",&a[0],&a[1],&a[2],&a[3], &a[4]);
      fclose(fp);
    }
    nanosleep(&timer, &remaining);
    for (uint32_t i = 0; i < transIDSinUse; i++)
    {
      if (transIDs[i].cnt > 12)
      {
        /* To old remove it.. */
        pthread_mutex_lock(&mutexTransId);
        for (uint32_t j = i + 1; j < transIDSinUse; j++)
        {
          memcpy( &transIDs[j - 1], &transIDs[j], sizeof(struct transIDInfo) );
        }
        transIDSinUse--;
        pthread_mutex_unlock(&mutexTransId);
      }
      else
      {
        pthread_mutex_lock(&mutexTransId);
        transIDs[i].cnt++;
        pthread_mutex_unlock(&mutexTransId);
      }
    }
    if (stun_pkt_cnt > max_stun_pkt_cnt)
    {
      max_stun_pkt_cnt = stun_pkt_cnt;
    }

    if (byte_cnt > max_byte_cnt)
    {
      max_byte_cnt = byte_cnt;
    }

    fp = fopen("/proc/stat","r");
    if (fp)
    {
      fscanf(fp,"%*s %llu %llu %llu %llu %llu",&b[0],&b[1],&b[2],&b[3],&b[4]);
      fclose(fp);
      cpu_user   = b[0] - a[0];
      cpu_nice   = b[1] - a[1];
      cpu_system = b[2] - a[2];
      cpu_idle   = b[3] - a[3];
      cpu_iowait = b[4] - a[4];
    }
    if (csv_output)
    {
      printCSV();
    }
    else
    {
      printf(
        "\rActive Transactions: %i  (Max: %i)   (Trans/sec: %i (%i), kbps: %i (%i))           ",
        transIDSinUse,
        maxTransIDSinUse,
        stun_pkt_cnt,
        max_stun_pkt_cnt,
        byte_cnt * 8 / 1000,
        max_byte_cnt * 8 / 1000);
    }

    stun_pkt_cnt = 0;
    byte_cnt     = 0;
    fflush(stdout);
    if ( (max_stun_pkt_cnt > 0) && (transIDSinUse == 0) )
    {
      exit(0);
    }
  }
  return NULL;
}