int main(int argc, char** argv)
{
    Timer mainTimer(Timer::Mode::Single);
	mainTimer.Start();
    
    int population, generation, elite, numberOfThreads;
    char *imageFile;
    if(argc != 6)
    {
        printf("Wrong number of arguments, correct number is: 1 population, 2 generation, 3 elite, 4 image file\n");
        return 0;
    }

    population = atoi(argv[1]);
    generation = atoi(argv[2]);
    elite = atoi(argv[3]);
    imageFile = argv[4];
    numberOfThreads = atoi(argv[5]);

    GeneticAlgorithm *geneticAlgorithm = new GeneticAlgorithm(population, generation, elite, imageFile, numberOfThreads);
    geneticAlgorithm->Calculate();
    delete geneticAlgorithm;
    
    mainTimer.Stop();
    printf("%lu,", mainTimer.Get());

    return 0;
}
Example #2
0
/* 每个JR_Interval触发一次 */
void ontimer(void){
    if(JR_next_time > JR_GetTicks()) return ;
    JR_next_time += JR_Interval;

    //do something here
    extern void mainTimer(); //define in the main.c
    mainTimer();
}
int main(int argc, char **argv) {
    int arraySize, numberOfArrays, me, numberOfProcesses;
    Timer mainTimer(Timer::Mode::Single);
    Timer sendTimer(Timer::Mode::Single);
    Timer recvTimer(Timer::Mode::Single);
    Timer medianTimer(Timer::Mode::Median);
    Timer mainTimerWithoutEnv(Timer::Mode::Single);
    mainTimer.Start();
    int provided;
    MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
    mainTimerWithoutEnv.Start();
    MPI_Comm_size(MPI_COMM_WORLD, &numberOfProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &me);
    MPI_Status status;
    int *array;
    if (me == 0) {
        if (argc != 3) {
            printf("%d", argc);
            printf("Wrong number of arguments, correct number is: 1- sizeOfArray 2- numberOfArrays\n");
            return 0;
        }
        arraySize = atoi(argv[1]);
        numberOfArrays = atoi(argv[2]);

        int sendParameters[2];
        sendParameters[0] = arraySize;
        sendParameters[1] = numberOfArrays;

        for (int i = 1; i < numberOfProcesses; ++i) {
            MPI_Send(sendParameters, 2, MPI_INT, i, PARAMETERS, MPI_COMM_WORLD);
        }
        array = (int *) _mm_malloc(sizeof(int) * numberOfArrays * arraySize, ALLOC_ALIGN_TRANSFER);
        for (int arrayCounter = 0; arrayCounter < numberOfArrays; arrayCounter++) {
            GenerateArray(array + (arrayCounter * arraySize), arraySize);
        }
    }
    else {
        int receiveParameters[2];
        MPI_Recv(receiveParameters, 2, MPI_INT, 0, PARAMETERS, MPI_COMM_WORLD, &status);
        arraySize = receiveParameters[0];
        numberOfArrays = receiveParameters[1];
    }

    if (me == 0) {
        int startingForProcess[numberOfProcesses - 1];
        std::vector<std::queue<Timer>> reqTimers;
        auto reqMutexs = new std::mutex[numberOfProcesses - 1];
        reqTimers.resize(numberOfProcesses - 1);
        static bool finishWork;
#pragma omp parallel private(status)
        {
#pragma omp sections
            {
#pragma omp section
                {
                    MPI_Request request[numberOfProcesses - 1];
                    MPI_Status statuses[numberOfProcesses - 1];
                    int actualStartingPoint = 0;
                    int finishStatus;
                    int howManyRequests = 0;
                    while (actualStartingPoint + arraySize <= numberOfArrays * arraySize) {
                        int ready = 1;
                        sendTimer.Start();
                        MPI_Recv(&ready, 1, MPI_INT, MPI_ANY_SOURCE, STATUS, MPI_COMM_WORLD, &status);
                        if (ready == 1 && (actualStartingPoint < numberOfArrays * arraySize)) {
                            startingForProcess[status.MPI_SOURCE - 1] = actualStartingPoint;
                            howManyRequests = howManyRequests % (numberOfProcesses-1);
                            MPI_Isend(&(array[actualStartingPoint]), arraySize, MPI_INT, status.MPI_SOURCE, STARTING,
                                     MPI_COMM_WORLD, &request[howManyRequests++]);
                            reqMutexs[status.MPI_SOURCE-1].lock();
                            reqTimers[status.MPI_SOURCE-1].push(sendTimer);
                            reqMutexs[status.MPI_SOURCE-1].unlock();
                            actualStartingPoint = actualStartingPoint + arraySize;
                        }
                    }
                    MPI_Waitall(howManyRequests, request, statuses);
                    finishStatus = 0;
                    for (int i = 1; i < numberOfProcesses; ++i)
                        MPI_Isend(&finishStatus, 1, MPI_INT, i, FINISH, MPI_COMM_WORLD, &request[i-1]);
                    MPI_Waitall(numberOfProcesses-1, request, statuses);
                    finishWork = true;
                }
#pragma omp section
                {
                    MPI_Request request[numberOfProcesses - 1];
                    MPI_Status statuses[numberOfProcesses - 1];
                    int howManyRequests = 0;
                    while (1) {
                        howManyRequests = howManyRequests % (numberOfProcesses-1);
                        MPI_Probe(MPI_ANY_SOURCE, RESULTS_DATA, MPI_COMM_WORLD, &status);
                        MPI_Irecv(&(array[startingForProcess[status.MPI_SOURCE - 1]]), arraySize, MPI_INT,
                                 status.MPI_SOURCE, RESULTS_DATA, MPI_COMM_WORLD, &request[howManyRequests++]);
                        reqMutexs[status.MPI_SOURCE-1].lock();
                        recvTimer = reqTimers[status.MPI_SOURCE-1].front();
                        reqTimers[status.MPI_SOURCE-1].pop();
                        reqMutexs[status.MPI_SOURCE-1].unlock();
                        recvTimer.Stop();
                        medianTimer.PushTime(recvTimer.Get());
                        if(finishWork)
                        {
                            MPI_Waitall(howManyRequests, request, statuses);
                            break;
                        }
                    }
                }
            }
        }
	delete[] reqMutexs;
    }
    else {
        int *smallArray = (int *) _mm_malloc(sizeof(int) * arraySize, ALLOC_ALIGN_TRANSFER);
        int finishStatus = 1;
        MPI_Request sendRequest =0, receiveRequest, statusRequest =0;
        MPI_Status singleStatus;
        MPI_Send(&finishStatus, 1, MPI_INT, 0, STATUS, MPI_COMM_WORLD);
        finishStatus = 0;
        while (true) {
            if(statusRequest != 0)
                MPI_Wait(&statusRequest, &singleStatus);
            if(sendRequest != 0)
                MPI_Wait(&sendRequest, &singleStatus);
            MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &singleStatus);
            if (singleStatus.MPI_TAG == STARTING) {
                MPI_Irecv(smallArray, arraySize, MPI_INT, 0, STARTING, MPI_COMM_WORLD, &receiveRequest);
                MPI_Wait(&receiveRequest, &singleStatus);

                MergeSort(smallArray, arraySize);
                MPI_Isend(smallArray, arraySize, MPI_INT, 0, RESULTS_DATA, MPI_COMM_WORLD, &sendRequest);

                finishStatus = 1;
                MPI_Isend(&finishStatus, 1, MPI_INT, 0, STATUS, MPI_COMM_WORLD, &statusRequest);
            }
            else if (singleStatus.MPI_TAG == FINISH) {
                break;
            }
        }
        _mm_free(smallArray);
    }
    if(me == 0)
    {
        _mm_free(array);
        mainTimerWithoutEnv.Stop();
        printf("MPI,%d,%d,%lu,%lu,%lu,",numberOfProcesses, arraySize, medianTimer.Get(), medianTimer.GetAvg(), mainTimerWithoutEnv.Get());
    }
    MPI_Finalize();
    mainTimer.Stop();
    if(me == 0) {
        printf("%lu,", mainTimer.Get());
    }
    return 0;
}
int main(int argc, char** argv)
{
    Timer mainTimer(Timer::Mode::Single), mainTimerWithoutEnv(Timer::Mode::Single);
    mainTimer.Start();
    MPI_Init(&argc, &argv);
    mainTimerWithoutEnv.Start();
    int width, height;
    int commSize, commRank;
    int processNameLenght;
    int PACKAGE_AMOUNT;
    int singlePackageOffset = 1024;
    MPI_Status status;

    MPI_Comm_size(MPI_COMM_WORLD, &commSize);
    MPI_Comm_rank(MPI_COMM_WORLD, &commRank);
    MPI_Request request[commSize-1];
    MPI_Status statuses[commSize-1];
    if (commRank == 0)
    {
        if(argc != 2)
        {
            printf("%d", argc);
            printf("Wrong number of arguments, correct number is: 1- width\n");
            return 0;
        }
        width = atoi(argv[1]);
        height = width;

        singlePackageOffset = (width * height)/((commSize-1));//std::min((width * height)/(2*(commSize-1)), 1024);
        PACKAGE_AMOUNT = (width * height) / singlePackageOffset;

        int sendParameters[3];
        sendParameters[0] = width;
        sendParameters[1] = height;
        sendParameters[2] = singlePackageOffset;


        for (int i = 1; i < commSize; i++) {
            MPI_Isend(sendParameters, 3, MPI_INT, i, PARAMETERS, MPI_COMM_WORLD, &request[i-1]);
        }
        MPI_Waitall(commSize-1, request, statuses);

    }
    else
    {
        int receiveParameters[3];
        MPI_Recv(receiveParameters, 3, MPI_INT, 0, PARAMETERS, MPI_COMM_WORLD, &status);
        width = receiveParameters[0];
        height = receiveParameters[1];
        singlePackageOffset = receiveParameters[2];
    }

    if (commRank == 0)
    {
        char *results = (char*)_mm_malloc(sizeof(char) * width * height, ALLOC_TRANSFER_ALIGN);

        MPI_Request request;
        MPI_Status status;
        int totalNumberOfIterations = PACKAGE_AMOUNT;
        auto indexProvider = [&](const int index) {
            return singlePackageOffset*index;
        };
        int outValue;

        CoreStatus* currentIndexes = (CoreStatus*)_mm_malloc((commSize-1)*sizeof(CoreStatus), ALLOC_ALIGN);
        for (int j = std::min(commSize-1, totalNumberOfIterations); j > 0 ; --j) {
            currentIndexes[j-1].indexes[0] = j-1;
            currentIndexes[j-1].currentlyCalculating = 0;
            MPI_Isend(currentIndexes[j-1].indexes, 1, MPI_INT, j, DATA, MPI_COMM_WORLD, &request);

        }
        for (int j = std::min(commSize-1, totalNumberOfIterations - (commSize-1)); j > 0 ; --j) {
            currentIndexes[j-1].indexes[1] = commSize-1+j-1;
            MPI_Isend(currentIndexes[j-1].indexes + 1, 1, MPI_INT, j, DATA, MPI_COMM_WORLD, &request);

        }
        double value;

        for (int i = PACKAGE_AMOUNT-1; i >= 2*(commSize-1); --i) {
            MPI_Probe(MPI_ANY_SOURCE, DATA, MPI_COMM_WORLD, &status);
            MPI_Recv(results + indexProvider(currentIndexes[status.MPI_SOURCE-1].indexes[currentIndexes[status.MPI_SOURCE-1].currentlyCalculating]), singlePackageOffset, MPI_CHAR, status.MPI_SOURCE, DATA, MPI_COMM_WORLD, &status);

            currentIndexes[status.MPI_SOURCE-1].indexes[currentIndexes[status.MPI_SOURCE-1].currentlyCalculating] = i;

            MPI_Isend(currentIndexes[status.MPI_SOURCE-1].indexes + currentIndexes[status.MPI_SOURCE-1].currentlyCalculating, 1, MPI_INT, status.MPI_SOURCE, DATA, MPI_COMM_WORLD, &request);

            currentIndexes[status.MPI_SOURCE-1].currentlyCalculating = (currentIndexes[status.MPI_SOURCE-1].currentlyCalculating +1)%2;
        }

        for (int i = std::min(2*(commSize-1), totalNumberOfIterations); i > 0; --i) {
            MPI_Probe(MPI_ANY_SOURCE, DATA, MPI_COMM_WORLD, &status);
            MPI_Recv(results + indexProvider(currentIndexes[status.MPI_SOURCE-1].indexes[currentIndexes[status.MPI_SOURCE-1].currentlyCalculating]), singlePackageOffset, MPI_CHAR, status.MPI_SOURCE, DATA, MPI_COMM_WORLD, &status);


            currentIndexes[status.MPI_SOURCE-1].currentlyCalculating = (currentIndexes[status.MPI_SOURCE-1].currentlyCalculating +1)%2;
        }

        for (int i = commSize-1; i > 0; --i) {
            MPI_Isend(nullptr, 0, MPI_INT, i, ENDGEN, MPI_COMM_WORLD,
                      &request);
        }
        _mm_free(currentIndexes);
        _mm_free(results);
    }
    else
    {
        char* results = (char*)_mm_malloc(2*singlePackageOffset, ALLOC_TRANSFER_ALIGN);
        int currentlyCalculating = 0;
        int inValue[2];
        int value[2];
        MPI_Status status[2];
        MPI_Request request[2];
        MPI_Recv(inValue, 1, MPI_INT,0, MPI_ANY_TAG, MPI_COMM_WORLD, status);
        while (status->MPI_TAG != ENDGEN){
            MPI_Irecv(inValue+((currentlyCalculating+1)%2), 1, MPI_INT,0, MPI_ANY_TAG, MPI_COMM_WORLD, request+((currentlyCalculating+1)%2));

            Mandelbrot(results+currentlyCalculating*singlePackageOffset, singlePackageOffset*inValue[currentlyCalculating], width, height, singlePackageOffset);

            MPI_Isend(results+currentlyCalculating*singlePackageOffset, singlePackageOffset ,MPI_CHAR ,0 ,DATA ,MPI_COMM_WORLD ,request + currentlyCalculating);
            currentlyCalculating = (currentlyCalculating+1)%2;

            MPI_Wait(request + currentlyCalculating, status);
        };
    }
    if (commRank == 0) {
        mainTimerWithoutEnv.Stop();
#ifndef __MIC__
        printf("MPI,%d,%d,%lu,", commSize - 1, width, mainTimerWithoutEnv.Get());
#endif
    }
    MPI_Finalize();
    if (commRank == 0) {
        mainTimer.Stop();
#ifndef __MIC__
        printf("%lu,", mainTimer.Get());
#endif
    }

    return 0;
}
Example #5
0
/* 每个JR_Interval触发一次 */
void ontimer(void){
    if(JR_next_time > JR_GetTicks()) return ;

    JR_next_time += JR_Interval;
    mainTimer();
}
Example #6
0
GameView::GameView(QWidget *parent){
    // Сглаживание
    this -> setRenderHint(QPainter::Antialiasing);

    // Количество игроков
    count_players = 2;

    // Количество действий
    // X A W S D Tab Q E Space
    count_actions = 9;

    // Забиваем вектор из действий
    if ( count_players > 0 ){
        QVector<int> temp = {
            Qt::Key_X, Qt::Key_A, Qt::Key_W, Qt::Key_S, Qt::Key_D,
            Qt::Key_Tab, Qt::Key_Q, Qt::Key_E, Qt::Key_Space
        };
        controls.push_back(temp);
    }

    if ( count_players > 1 ){
        QVector<int> temp = {
            Qt::Key_2, Qt::Key_4, Qt::Key_8, Qt::Key_5, Qt::Key_6,
            Qt::Key_Plus, Qt::Key_7, Qt::Key_9, Qt::Key_0
        };
        controls.push_back(temp);
    }


    // Инициализируем переменную fps
    fps = 120;

    // Создаем графическую сцену
    scene = new QGraphicsScene(this);
    scene -> setSceneRect(-1, -1, screen_size.x()+1, screen_size.y()+1);

    // Ставим фон сцены
    QImage * img = new QImage(":/images/bg_1_1.png");
    resizeImage(img, QSize(1920.0*resize_factor, 1080.0*resize_factor));
    QBrush bg_brush(*img);
    scene -> setBackgroundBrush(bg_brush);

    // Добавляем границы сцены
    addBorder();

    // Ставим нашу сцену
    setScene(scene);

    // Отключение полос прокрутки
    setHorizontalScrollBarPolicy(Qt::ScrollBarAlwaysOff);
    setVerticalScrollBarPolicy(Qt::ScrollBarAlwaysOff);

    // Ставим заголовок окна
    setWindowTitle("Space Wars");

    // Добавление игрока 1
    player1 = new Ship(1, 1, 3, 1);
    placeShip(player1,
        scene -> width() / 2.0 - player1 -> boundingRect().width(),
        scene -> height() / 2.0 - player1 -> boundingRect().height() / 2.0
    );

    // Добавление игрока 2
    player2 = new Ship(1, 2, 4, 1);
    placeShip(player2,
        scene -> width() / 2.0,
        scene -> height() / 2.0 - player1 -> boundingRect().height() / 2.0
    );

    // Добавление мобов
    /*
    for (int j = 2; j <= 2; ++j)
        for (int i = 1; i <= 4; ++i){
            Ship * ship = new Ship(1, j, i, qrand()%4+1);
            npcs.insert(ship);
            placeShip(ship,
                qrand()%qRound(scene -> width()- ship -> boundingRect().width() ),
                qrand()%qRound(scene -> height()- ship -> boundingRect().height() )
            );
        }
    */

    // Главный таймер
    QTimer * timer = new QTimer();
    connect(timer, SIGNAL(timeout()), this, SLOT(mainTimer()));
    timer -> start(1000.0 / fps);
}