void Algorithm::RunParallel(set<Algorithm*> algos, Graph& G, vector<string> parameters,
                                    float MaxApproximationDistance, float MinCorrectnessProbability)
        {
            set<Algorithm*> SelectedAlgorithms;
            for(set<Algorithm*>::iterator i = algos.begin(); i != algos.end(); i++)
                if((*i)->SuitableFor(G)
                && (*i)->CanGuaranteeApproximationDistance(G, MaxApproximationDistance)
                && (*i)->CanGuaranteeCorrectnessProbability(G, MinCorrectnessProbability))
                    SelectedAlgorithms.insert(*i);

            if(SelectedAlgorithms.size() == 0)
            {
                throw "No suitable algorithm found";
            }
            else if(SelectedAlgorithms.size() == 1) // we have 1 algorithm => no multithreading needed
            {
                Algorithm* algo = *SelectedAlgorithms.begin();
                algo->Run(G, parameters);
            }
            else
            {
                // we have more than 1 algorithm => run them in parallel

                // give each algorithm its own copy of G
                map<Thread*, Graph*> GraphCopies;
                for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++)
                    GraphCopies[*i] = new Graph(G);


                ConditionVariable synchronize;
                Thread* finishedAlgorithm = NULL;
                synchronize.Lock();

                cerr << "starting " << SelectedAlgorithms.size() << " of " << algos.size() << " algorithms\n";
                for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++)
                    (*i)->RunInThread(GraphCopies[*i], parameters, &synchronize, &finishedAlgorithm);


                while(finishedAlgorithm == NULL) // a mislead interrupt can cause the Wait to stop, therefore
                    synchronize.Wait(); // this has to be in a loop that checks whether someone has actually finished
                G = *(GraphCopies[finishedAlgorithm]);


                cerr << "someone finished. sending termination requests\n";
                for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++)
                    (*i)->Terminate();
                synchronize.Unlock();

                cerr << "waiting for threads to join\n";
                for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++)
                {
                    (*i)->Join();
                    delete GraphCopies[*i];
                }
                GraphCopies.clear();
                cerr << "everyone joined\n";
            }
        }
Exemplo n.º 2
0
static DWORD WINAPI taskEntry(LPVOID arg) {
#else
static void *taskEntry(void *arg) {
#endif
    while (true) {
        workerSemaphore.Wait();
        // Try to get task from task queue
        Task *myTask = NULL;
        {   MutexLock lock(*taskQueueMutex);
            if (taskQueue.size() == 0)
                break;
            myTask = taskQueue.back();
            taskQueue.pop_back();
        }

        // Do work for _myTask_
        PBRT_STARTED_TASK(myTask);
        myTask->Run();
        PBRT_FINISHED_TASK(myTask);
        tasksRunningCondition.Lock();
        int unfinished = --numUnfinishedTasks;
        if (unfinished == 0)
            tasksRunningCondition.Signal();
        tasksRunningCondition.Unlock();
    }
    // Cleanup from task thread and exit
#ifdef PBRT_HAS_PTHREADS
    pthread_exit(NULL);
#endif // PBRT_HAS_PTHREADS
    return 0;
}


#endif // !PBRT_USE_GRAND_CENTRAL_DISPATCH
void WaitForAllTasks() {
#ifdef PBRT_USE_GRAND_CENTRAL_DISPATCH
    dispatch_group_wait(gcdGroup, DISPATCH_TIME_FOREVER);
#else
    tasksRunningCondition.Lock();
    while (numUnfinishedTasks > 0)
        tasksRunningCondition.Wait();
    tasksRunningCondition.Unlock();
#endif
}
Exemplo n.º 3
0
void EnqueueTasks(const vector<Task *> &tasks) {
    if (PbrtOptions.nCores == 1) {
        for (unsigned int i = 0; i < tasks.size(); ++i)
            tasks[i]->Run();
        return;
    }
#ifdef PBRT_USE_GRAND_CENTRAL_DISPATCH
    for (uint32_t i = 0; i < tasks.size(); ++i)
        dispatch_group_async_f(gcdGroup, gcdQueue, tasks[i], lRunTask);
#else
    if (!threads)
        TasksInit();

    {   MutexLock lock(*taskQueueMutex);
        for (unsigned int i = 0; i < tasks.size(); ++i)
            taskQueue.push_back(tasks[i]);
    }
    tasksRunningCondition.Lock();
    numUnfinishedTasks += tasks.size();
    tasksRunningCondition.Unlock();

    workerSemaphore.Post(tasks.size());
#endif
}
Exemplo n.º 4
0
void EnqueueTasks(const vector<Task *> &tasks) {
#ifdef PBRT_USE_GRAND_CENTRAL_DISPATCH
    static bool oneThread = (getenv("PBRT_NTHREADS") &&
                               atoi(getenv("PBRT_NTHREADS")) == 1);
    for (u_int i = 0; i < tasks.size(); ++i)
        if (oneThread)
            dispatch_sync_f(gcdQueue, tasks[i], lRunTask);
        else
            dispatch_group_async_f(gcdGroup, gcdQueue, tasks[i], lRunTask);
#else
    if (!threads)
        TasksInit();

    { MutexLock lock(*taskQueueMutex);
    for (unsigned int i = 0; i < tasks.size(); ++i)
        taskQueue.push_back(tasks[i]);
    }
    tasksRunningCondition.Lock();
    numUnfinishedTasks += tasks.size();
    tasksRunningCondition.Unlock();

    workerSemaphore.Post(tasks.size());
#endif
}