示例#1
0
 /*! creates a hardware thread running on specific core */
 thread_t createThread(thread_func f, void* arg, size_t stack_size, ssize_t threadID)
 {
   HANDLE thread = CreateThread(NULL, stack_size, (LPTHREAD_START_ROUTINE)threadStartup, new ThreadStartupData(f,arg), 0, NULL);
   if (thread == NULL) THROW_RUNTIME_ERROR("cannot create thread");
   if (threadID >= 0) setAffinity(thread, threadID);
   return thread_t(thread);
 }
示例#2
0
 /*! creates a hardware thread running on specific core */
 thread_t createThread(thread_func f, void* arg, size_t stack_size, ssize_t threadID)
 {
   HANDLE thread = CreateThread(nullptr, stack_size, (LPTHREAD_START_ROUTINE)threadStartup, new ThreadStartupData(f,arg), 0, nullptr);
   if (thread == nullptr) FATAL("CreateThread failed");
   if (threadID >= 0) setAffinity(thread, threadID);
   return thread_t(thread);
 }
void ui(int argc, char* argv[])
{
        if (getenv("RTPRIO")){
		sched_fifo_priority = atoi(getenv("RTPRIO"));
        }
	if (getenv("VERBOSE")){
		verbose = atoi(getenv("VERBOSE"));
	}
	if (getenv("DEVNUM")){
		devnum = atoi(getenv("DEVNUM"));
	}
        if (getenv("AFFINITY")){
                setAffinity(strtol(getenv("AFFINITY"), 0, 0));
        }

	/* own PA eg from GPU */
	if (getenv("PA_BUF")){
		xllc_def.pa = strtoul(getenv("PA_BUF"), 0, 0);
	}
	if (argc > 1){
		nsamples = atoi(argv[1]);
	}
	if (argc > 2){
		samples_buffer = atoi(argv[2]);
	}
	G_action = null_action;
	if (getenv("ACTION")){
		if (strcmp(getenv("ACTION"), "check_tlatch") == 0){
			G_action = check_tlatch_action;
		}
	}
}
示例#4
0
  static void* threadStartup(ThreadStartupData* parg)
  {
    _mm_setcsr(_mm_getcsr() | /*FTZ:*/ (1<<15) | /*DAZ:*/ (1<<6));

#if !defined(__LINUX__) || defined(__MIC__)
    if (parg->affinity >= 0)
	setAffinity(parg->affinity);
#endif

    parg->f(parg->arg);
    delete parg;
    return NULL;
  }
示例#5
0
/*
 * Initialise an Insense component and start a POSIX thread running the behaviour function from the component.
 */
void *component_create(behaviour_ft behaviour, int struct_size, int stack_size, int argc, void *argv[], int core) {
	// Define thread
	struct IComponent_data *this_ptr;
	// Allocate space for the struct
#if HEAPS == HEAP_PRIVATE // Private heaps
	struct shMapType *heapElement = new_PrivateHeap(); 		// Create a new private heap (to be put to the heap map)
	this_ptr = DAL_alloc_in_specific_heap(struct_size, true, heapElement); 			// Allocate space for this_ptr in the newly created private heap
	if (this_ptr == NULL ) {
		return NULL ;
	} else {
		memset(this_ptr, 0, struct_size);
	}
#else // Shared heap
	if ((this_ptr = ((struct IComponent_data *) DAL_alloc(struct_size, true))) == NULL ) {
		return NULL;
	} else {
		memset(this_ptr, 0, struct_size);
	}
#endif
	// Initialize this->comp_create_sem
	my_sem_init(&(this_ptr->component_create_sem), 0);
	// Setup the stopped condition
	if (struct_size) {
		struct IComponent_data *t = (struct IComponent_data*) this_ptr;
		t->stopped = 0;
	}

	// Define new structure for arguments for the wrapper function
	// Whenever dealing with garbage collection, first define as NULL
	struct argStructType * argStruct = malloc(sizeof(struct argStructType));
	argStruct->behaviour = behaviour;
	argStruct->argc = argc;
	argStruct->argv = argv;
	argStruct->this_ptr = this_ptr;

	// Create thread
#if HEAPS == HEAP_PRIVATE // Private heaps
	pthread_mutex_lock(&thread_lock); // Lock mutex to make component thread wait until its heap has been put inserted into the heap map
#endif

	pthread_create(&this_ptr->behav_thread, NULL, startRoutine, argStruct); // Create a POSIX thread, use wrapper function startRoutine to pass three arguments to the function running inside of the thread

	//Set affinity
#if AFFINITY_ALGO != AFFINITY_DYNAMIC
	if (core != -1) { // Manually passed Core ID
		setAffinityToCore(this_ptr->behav_thread, core);// Use passed ID of a core
	} else { // Core ID was not passed to the component_create function
		setAffinity(this_ptr->behav_thread);// Use an algorithm defined in GlobalVars.h
	}
	getAffinityThread(this_ptr->behav_thread); // Check if setting affinity worked. Data outputted only in PRINTMC is defined.
#endif

	// If small heaps are used, add a new entry to the map with a pointer to a newly created pthread.
#if HEAPS == HEAP_PRIVATE // Private heaps
	heapElement->thread_id = this_ptr->behav_thread; // Put a thread id to the element to be to the map
	PRINTFMC("Component created. Thread ID: %x\n", heapElement->thread_id);
	listAdd(SHList, heapElement);
	pthread_mutex_unlock(&thread_lock); // Unlock mutex to permit component to continue now that its heap has been put inserted into the heap map
#endif
	// Insert thread into the list of threads
	listAdd(threadList, this_ptr->behav_thread);
	my_sem_wait(&this_ptr->component_create_sem); // Wait for creation of the component
	return this_ptr;
}
 void on_scheduler_entry( bool ) {
   setAffinity(TaskScheduler::threadIndex()); 
 }
示例#7
0
 /*! set affinity of the calling thread */
 void setAffinity(ssize_t affinity) {
   setAffinity(GetCurrentThread(), affinity);
 }
void CAdvThread::threadMainFunction()
{
    getCurrentHandle();
    setAffinity();
    while (isThreadWork)
    {
        std::unique_lock<std::mutex> locker(threadMutex);
        // wait notification and check that it does not a false awakening
        // Thread must awake if list is not empty or it was poweroff
        conditionVariable.wait(locker, [&](){ return !taskArray.empty() || !isThreadWork || affinityData.coreChanged;});

        while(!taskArray.empty())
        {
            if(!isThreadWork)//if stop thread of pool
            {
                while(!taskArray.empty())//stop all tasks
                {
                    runnable_closure runClosure = taskArray.front();//get task from array
                    taskArray.pop();
                    locker.unlock();//unlock before task call
                    runClosure(int(eRUN_MODES::STOP), 0);
                    locker.lock();
                }
                return;
            }

            currentRunnableClosure = taskArray.front();//get task from array
            taskArray.pop();

            locker.unlock();//unlock before task call
            try
            {
                if(threadType == eThreadType::THREAD_SHARED && getRunType() == eRunnableType::SHORT_TASK)
                {
                    QString stringTimerResult = currentRunnableClosure(int(eRUN_MODES::IS_TIMER_OVER), 0);
                    if(stringTimerResult == QString("1"))
                    {
                        lastTimeOfTaskLaunch = std::chrono::high_resolution_clock::now();
                        isReadyForSend_WarningAboutShortTaskFreeze = true;
                        currentRunnableClosure(int(eRUN_MODES::RUN), 0);
                        isReadyForSend_WarningAboutShortTaskFreeze = false;
                    }
                    else //task hold over
                        appendRunnableTask(currentRunnableClosure, eRunnableType::SHORT_TASK);
                }
                else
                {
                    lastTimeOfTaskLaunch = std::chrono::high_resolution_clock::now();
                    isReadyForSend_WarningAboutShortTaskFreeze = true;
                    currentRunnableClosure(int(eRUN_MODES::RUN), 0);
                    isReadyForSend_WarningAboutShortTaskFreeze = false;
                }

            }
            catch(holdOverTask_Exception action)
            {
                if(threadType == eThreadType::THREAD_SHARED && getRunType() == eRunnableType::SHORT_TASK)
                {
                    int counterOfExecution = currentRunnableClosure(int(eRUN_MODES::GET_COUNTER), 0).toInt();
                    if(counterOfExecution>0)
                    {
                        CAdvThreadPool::getInstance().getEmitter()->addWarningToShell(QString("Task was hold over - counter = %1 (%2)").arg(counterOfExecution)
                                                                                      .arg(QString::fromStdString(action.what())),
                                                                                      eLogWarning::message);
                        currentRunnableClosure(int(eRUN_MODES::DECREASE_COUNTER), 0);
                        currentRunnableClosure(int(eRUN_MODES::START_TIMER_FOR_INTERVAL), action.getInterval());
                        appendRunnableTask(currentRunnableClosure, eRunnableType::SHORT_TASK);
                    }
                    else
                    {
                        CAdvThreadPool::getInstance().getEmitter()->addWarningToShell(QString("Task will not processing - counter over (%2)").arg(counterOfExecution)
                                                                                      .arg(QString::fromStdString(action.what())),
                                                                                      eLogWarning::warning);
                    }
                }

            }
            catch(std::exception& e)
            {
                QString temp = QString("%1 - CAdvThread::threadMainFunction - ").arg(e.what());
                temp += getWho();
                std::cout<<temp.toStdString()<<std::endl;
                CAdvThreadPool::getInstance().getEmitter()->addWarningToShell(temp, eLogWarning::warning);
            }
            catch(...)
            {               
                QString temp = QString("Undefined exception - CAdvThread::threadMainFunction - ");
                temp += getWho();
                std::cout<<temp.toStdString()<<std::endl;
                CAdvThreadPool::getInstance().getEmitter()->addWarningToShell(temp, eLogWarning::warning);
            }

            locker.lock(); // lock before m_TaskArray.empty()
            currentRunnableClosure = nullptr;

            if(threadType == eThreadType::THREAD_NOT_SHARED)
                CAdvThreadPool::getInstance().getEmitter()->sendSignal_DeleteLongTask(getThreadNumber());
            else if(threadType == eThreadType::THREAD_NOT_SHARED_EXTRA)
                CAdvThreadPool::getInstance().getEmitter()->sendSignal_DeleteExtraLongTask(getThreadNumber());
        }
    }
}
bool CAdvThread::setCoreMask(int mask)
{
    affinityData.coreMask = mask;
    affinityData.coreChanged = 1;
    setAffinity();
}
示例#10
0
PlatformLinux::PlatformLinux(int nLrt, int shMemSize, lrtFct* fcts, int nLrtFcts){
	int pipeSpidertoLRT[2*nLrt];
	int pipeLRTtoSpider[2*nLrt];
	int pipeTrace[2];
	sem_t* semFifo;
	sem_t* semTrace;

	if(platform_)
		throw "Try to create 2 platforms";

	platform_ = this;

	cpIds_ = CREATE_MUL(ARCHI_STACK, nLrt, int);

	cpIds_[0] = getpid();

	sem_unlink("spider_fifo");
	sem_unlink("spider_trace");

	semFifo = sem_open("spider_fifo", O_CREAT | O_EXCL, ACCESSPERMS, 1);
	semTrace = sem_open("spider_trace", O_CREAT | O_EXCL, ACCESSPERMS, 1);

	if(semFifo == 0 || semTrace == 0){
		printf("Error creating semaphores\n");
		throw "Error creating semaphores\n";
	}

	if (pipe2(pipeTrace, O_NONBLOCK) == -1) {
		perror("pipe");
		exit(EXIT_FAILURE);
	}

	printf("Pipe Trace: %d <= %d\n", pipeTrace[0], pipeTrace[1]);

	for(int i=0; i<nLrt; i++){
		/** Open Pipes */
		if (pipe2(pipeSpidertoLRT+2*i, O_NONBLOCK) == -1
				|| pipe2(pipeLRTtoSpider+2*i, O_NONBLOCK) == -1) {
			perror("pipe");
			exit(EXIT_FAILURE);
		}
		fcntl(pipeSpidertoLRT[2*i  ], F_SETPIPE_SZ, 1024*1024);
		fcntl(pipeSpidertoLRT[2*i+1], F_SETPIPE_SZ, 1024*1024);
		fcntl(pipeLRTtoSpider[2*i  ], F_SETPIPE_SZ, 1024*1024);
		fcntl(pipeLRTtoSpider[2*i+1], F_SETPIPE_SZ, 1024*1024);
		printf("Pipe Spider=>LRT %d: %d <= %d\n", i, pipeSpidertoLRT[2*i], pipeSpidertoLRT[2*i+1]);
		printf("Pipe LRT=>Spider %d: %d <= %d\n", i, pipeLRTtoSpider[2*i], pipeLRTtoSpider[2*i+1]);
	}

	for(int i=1; i<nLrt; i++){
		pid_t cpid = fork();
		if (cpid == -1) {
			perror("fork");
			exit(EXIT_FAILURE);
		}

		if (cpid == 0) { /* Child */
			/** Close unused pipe */

			/** Initialize shared memory */
			initShMem(shMemSize);

			/** Register Signals */
			signal(SIG_IDLE, sig_handler);
			signal(SIG_WAKE, sig_handler);

			/** Create LRT */
			lrtCom_ = (LrtCommunicator*) CREATE(ARCHI_STACK, LinuxLrtCommunicator)(
					MAX_MSG_SIZE,
					pipeSpidertoLRT[2*i],
					pipeLRTtoSpider[2*i+1],
					pipeTrace[1],
					semFifo,
					semTrace,
					shMem,
					dataMem,
					NFIFOS);
			lrt_ = CREATE(ARCHI_STACK, LRT)(i);
			setAffinity(i);
			lrt_->setFctTbl(fcts, nLrtFcts);

			/** launch LRT */
			lrt_->runInfinitly();

			exit(EXIT_SUCCESS);
		} else { /* Parent */
			cpIds_[i] = cpid;
		}
	}

	/** Close unused pipe */

	/** Initialize shared memory */
	initShMem(shMemSize);
	memset(shMem,0,shMemSize);

	/** Register Signals */
	signal(SIG_IDLE, sig_handler);
	signal(SIG_WAKE, sig_handler);

	/** Initialize LRT and Communicators */
	spiderCom_ = CREATE(ARCHI_STACK, LinuxSpiderCommunicator)(
			MAX_MSG_SIZE,
			nLrt,
			semTrace,
			pipeTrace[1],
			pipeTrace[0]);

	for(int i=0; i<nLrt; i++)
		((LinuxSpiderCommunicator*)spiderCom_)->setLrtCom(i, pipeLRTtoSpider[2*i], pipeSpidertoLRT[2*i+1]);

	lrtCom_ = CREATE(ARCHI_STACK, LinuxLrtCommunicator)(
			MAX_MSG_SIZE,
			pipeSpidertoLRT[0],
			pipeLRTtoSpider[1],
			pipeTrace[1],
			semFifo,
			semTrace,
			shMem,
			dataMem,
			NFIFOS);
	lrt_ = CREATE(ARCHI_STACK, LRT)(0);
	setAffinity(0);
	lrt_->setFctTbl(fcts, nLrtFcts);


	/** Create Archi */
	archi_ = CREATE(ARCHI_STACK, SharedMemArchi)(
				/* Nb PE */		nLrt,
				/* Nb PE Type*/ 1,
				/* Spider Pe */ 0,
				/*MappingTime*/ this->mappingTime);

	archi_->setPETypeRecvSpeed(0, 1, 10);
	archi_->setPETypeSendSpeed(0, 1, 10);
	archi_->setPEType(0, 0);
	archi_->activatePE(0);

	char name[40];
	sprintf(name, "PID %d (Spider)", cpIds_[0]);
	archi_->setName(0, name);
	for(int i=1; i<nLrt; i++){
		sprintf(name, "PID %d (LRT %d)", cpIds_[i], i);
		archi_->setPEType(i, 0);
		archi_->setName(i, name);
		archi_->activatePE(i);
	}

	Spider::setArchi(archi_);

	this->rstTime();
}
 virtual void init()
 {
     cds::threading::Manager::attachThread()     ;
     m_Bag.initThread( m_threadId );
     setAffinity(m_threadId, 2+m_threadId);
 }
示例#12
0
  void TaskSchedulerTBB::thread_loop(size_t threadIndex) try 
  {
#if defined(__MIC__)
    setAffinity(threadIndex);
#endif

    /* allocate thread structure */
    Thread thread(threadIndex,this);
    threadLocal[threadIndex] = &thread;
    thread_local_thread = &thread;

    /* main thread loop */
    while (!terminate)
    {
      auto predicate = [&] () { return anyTasksRunning || terminate; };

      /* all threads are either spinning ... */
      if (spinning) 
      {
	while (!predicate())
          __pause_cpu(32);
      }
      
      /* ... or waiting inside some condition variable */
      else
      {
        //std::unique_lock<std::mutex> lock(mutex);
        Lock<MutexSys> lock(mutex);
        condition.wait(mutex, predicate);
      }
      if (terminate) break;

      /* special static load balancing for top level task sets */
#if TASKSCHEDULER_STATIC_LOAD_BALANCING
      if (executeTaskSet(thread))
        continue;
#endif

      /* work on available task */
      steal_loop(thread,
                 [&] () { return anyTasksRunning > 0; },
                 [&] () { 
                   atomic_add(&anyTasksRunning,+1);
                   while (thread.tasks.execute_local(thread,nullptr));
                   atomic_add(&anyTasksRunning,-1);
                 });
    }

    /* decrement threadCount again */
    atomic_add(&threadCounter,-1);

    /* wait for all threads to terminate */
    while (threadCounter > 0)
      yield();

    threadLocal[threadIndex] = nullptr;
  }
  catch (const std::exception& e) 
  {
    std::cout << "Error: " << e.what() << std::endl; // FIXME: propagate to main thread
    threadLocal[threadIndex] = nullptr;
    exit(1);
  }