Esempio n. 1
0
void TasksCleanup() {
#ifdef PBRT_USE_GRAND_CENTRAL_DISPATCH
    return;
#else // // PBRT_USE_GRAND_CENTRAL_DISPATCH
    {   MutexLock lock(*taskQueueMutex);
        Assert(taskQueue.size() == 0);
    }

    static const int nThreads = NumSystemCores();
    workerSemaphore.Post(nThreads);

    if (threads != NULL) {
#ifdef PBRT_HAS_PTHREADS
        for (int i = 0; i < nThreads; ++i) {
            int err = pthread_join(threads[i], NULL);
            if (err != 0)
                Severe("Error from pthread_join: %s", strerror(err));
        }
#endif
#ifdef WIN32
        WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE);
        for (int i = 0; i < nThreads; ++i) {
            CloseHandle(threads[i]);
        }
#endif // WIN32
        delete[] threads;
        threads = NULL;
    }
#endif // PBRT_USE_GRAND_CENTRAL_DISPATCH
}
Esempio n. 2
0
/**
 * Reads the SimStepData from a Calculation thread and adds it to the intern SSD buffer and the SRDF buffer.
 * parameter: pointer from type SimStepData, it points on a data struct in a Calculation thread
 */
bool setResultData(SimStepData* p_SimStepData_from_Calculation) {
  bool retValue = true;

  /*
   * This part is necessary for the producer &consumer problem with districted buffer
   * The any entity which want to use the array must pas this part
   */
  // Try to enter the ghSemaphore_NumberFreeSlots gate.
  ghSemaphore_NumberFreeSlots.Wait();

  ssdMutex.Lock();
  /********************************************************************
   * Entity has pas the synchronization sektion and can work on the SSD buffer
   * Restrictions: if the simulation has been reseted the first time value must be VALID_TIME_AFTER_RESET
   * otherwise the result won't be added to the system
   */

  //block used by normal running simulation
  if(!simulationReset && !simulationChangetime){
    addDataToSSD(p_SimStepData_from_Calculation);
    //cout << "add time: " << p_SimStepData_from_Calculation->forTimeStep   << endl; fflush(stdout);
  }else{//block used once after simulation has been reseted or more if the next time to add into the ssd is not VALID_TIME_AFTER_RESET
    if(simulationReset){
      if(p_SimStepData_from_Calculation->forTimeStep == VALID_TIME_AFTER_RESET || p_SimStepData_from_Calculation->forTimeStep == 0){
          addDataToSSD(p_SimStepData_from_Calculation);
          //cout << "add after reset time: " << p_SimStepData_from_Calculation->forTimeStep   << endl; fflush(stdout);
          simulationReset = false;
      }
      else{
        //cout << "no chance for reset ;) time: " << p_SimStepData_from_Calculation->forTimeStep << endl; fflush(stdout);
      }
    } else{
      if(simulationChangetime){

        if(compareDouble(p_SimStepData_from_Calculation->forTimeStep, VALID_TIME_AFTER_CHANGETIME)){
          //cout << "add after change time: " << p_SimStepData_from_Calculation->forTimeStep   << endl; fflush(stdout);
            addDataToSSD(p_SimStepData_from_Calculation);
            simulationChangetime = false;
        } else{
          //cout << "no chance for change ;) time: " << p_SimStepData_from_Calculation->forTimeStep << endl; fflush(stdout);
        }
      }
    }
  }

  //Work on SSD and SRDF buffer ended **********************************

  // Release the mutex
  if (!ssdMutex.Unlock()) {
    //printf("ReleaseMutex ssdMutex error: %d\n", GetLastError());
    return false;
  }
  //if(debugResultManager) { cout << "set released mutex" << endl; fflush(stdout); }
  // Release the semaphore ghSemaphore_NumberUsedSlots
  ghSemaphore_NumberUsedSlots.Post();

  return retValue;
}
Esempio n. 3
0
int main(int argc, char *argv[]) {
	Semaphore sem;
	Thread thread(_ThreadFun1, &sem);
	thread.Start();

	for (int i=0; i<2; i++) {
		sem.Post();
		sleep(1);
	}
}
Esempio n. 4
0
int main()
{

	Semaphore *sem = new Semaphore("/tmp.test.trigger");

	sem->Post();

	cout<<"trigger on."<<endl;

	delete sem;

	return 0;
}
Esempio n. 5
0
void EnqueueTasks(const vector<Task *> &tasks) {
    if (PbrtOptions.nCores == 1) {
        for (unsigned int i = 0; i < tasks.size(); ++i)
            tasks[i]->Run();
        return;
    }
#ifdef PBRT_USE_GRAND_CENTRAL_DISPATCH
    for (uint32_t i = 0; i < tasks.size(); ++i)
        dispatch_group_async_f(gcdGroup, gcdQueue, tasks[i], lRunTask);
#else
    if (!threads)
        TasksInit();

    {   MutexLock lock(*taskQueueMutex);
        for (unsigned int i = 0; i < tasks.size(); ++i)
            taskQueue.push_back(tasks[i]);
    }
    tasksRunningCondition.Lock();
    numUnfinishedTasks += tasks.size();
    tasksRunningCondition.Unlock();

    workerSemaphore.Post(tasks.size());
#endif
}
Esempio n. 6
0
void EnqueueTasks(const vector<Task *> &tasks) {
#ifdef PBRT_USE_GRAND_CENTRAL_DISPATCH
    static bool oneThread = (getenv("PBRT_NTHREADS") &&
                               atoi(getenv("PBRT_NTHREADS")) == 1);
    for (u_int i = 0; i < tasks.size(); ++i)
        if (oneThread)
            dispatch_sync_f(gcdQueue, tasks[i], lRunTask);
        else
            dispatch_group_async_f(gcdGroup, gcdQueue, tasks[i], lRunTask);
#else
    if (!threads)
        TasksInit();

    { MutexLock lock(*taskQueueMutex);
    for (unsigned int i = 0; i < tasks.size(); ++i)
        taskQueue.push_back(tasks[i]);
    }
    tasksRunningCondition.Lock();
    numUnfinishedTasks += tasks.size();
    tasksRunningCondition.Unlock();

    workerSemaphore.Post(tasks.size());
#endif
}