예제 #1
0
파일: map.hpp 프로젝트: Lazin/RaftLib
      void exe()
   {
      for( auto * const submap : sub_maps )
      {
         auto &container( all_kernels.acquire() );
         auto &subcontainer( submap->all_kernels.acquire() );  
         container.insert( subcontainer.begin(),
                           subcontainer.end()   );
         all_kernels.release();
         submap->all_kernels.release();
      }
      /** check types, ensure all are linked **/
      checkEdges( source_kernels );
      /** adds in split/join kernels **/
      //enableDuplication( source_kernels, all_kernels );
      volatile bool exit_alloc( false );
      allocator alloc( (*this), exit_alloc );
      /** launch allocator in a thread **/
      std::thread mem_thread( [&](){
         alloc.run();
      });
     
      alloc.waitTillReady();

      scheduler sched( (*this) );
      sched.init();
      
      /** launch scheduler in thread **/
      std::thread sched_thread( [&](){
         sched.start();
      });

      volatile bool exit_para( false );
      /** launch parallelism monitor **/
      parallelism_monitor pm( (*this)     /** ref to this    **/, 
                              alloc       /** allocator      **/,
                              sched       /** scheduler      **/,
                              exit_para   /** exit parameter **/);
      std::thread parallel_mon( [&](){
         pm.start();
      });
      /** join scheduler first **/
      sched_thread.join();

      /** scheduler done, cleanup alloc **/
      exit_alloc = true;
      mem_thread.join();
      /** no more need to duplicate kernels **/
      exit_para = true;
      parallel_mon.join();

      /** all fifo's deallocated when alloc goes out of scope **/
      return; 
   }
예제 #2
0
파일: nano_thread.c 프로젝트: vocho/openqnx
/**
//
// Handle thread creation during when address space is available
// and we can recover from faults (from bad user pointers...)
//*/
void rdecl
thread_specret(THREAD *thp) {
	struct _thread_local_storage	*tsp;
	const struct _thread_attr		*attr;
	void							*init_cc;
	uintptr_t						 stack_top;
	uintptr_t						 new_sp;
	int								 verify;

	thp->status = (void *)EFAULT;
	if((attr = thp->args.wa.attr)) {
		//RD_VERIFY_PTR(act, attr, sizeof(*attr));
		//RD_PROBE_INT(act, attr, sizeof(*attr) / sizeof(int));

		// Check for attributes which we do not support.
		// If there is a stack addr there must be a stack size.
		// If there is a stack size it must be at lease PTHREAD_STACK_MIN.
		// If EXPLICIT sched, make sure policy and priority are valid.
		//                    add validation of the sporadic server attributes
		if(attr->__flags & PTHREAD_SCOPE_PROCESS) {
			verify = ENOTSUP;
		} else if((attr->__stackaddr || attr->__stacksize) && attr->__stacksize < PTHREAD_STACK_MIN) {
			verify = EINVAL;
		} else if(attr->__flags & PTHREAD_EXPLICIT_SCHED) {
			verify = kerschedok(thp, attr->__policy, (struct sched_param *)&attr->__param);
		} else {
			verify = EOK;
		}

		if(verify != EOK) {
			lock_kernel();
			thp->status = (void *)verify;
			thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
			return;
			// RUSH3: this comes out in loader_exit() but EINTR overridden
		}
	}

	// Check if we need to allocate a stack
	if(!(thp->flags & _NTO_TF_ALLOCED_STACK)) {
		uintptr_t					guardsize = 0;
		unsigned					lazystate = 0;
		unsigned					prealloc  = 0;

		if(attr) {
			// Get the user requested values.
			thp->un.lcl.stackaddr = attr->__stackaddr;
			thp->un.lcl.stacksize = attr->__stacksize;
			if(attr->__stackaddr != NULL &&
			  !WR_PROBE_PTR(thp, thp->un.lcl.stackaddr, thp->un.lcl.stacksize)) {
				lock_kernel();
				thp->status = (void *)EINVAL;
				thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
				return;
			}
			guardsize = attr->__guardsize;
			prealloc = attr->__prealloc;
			lazystate = attr->__flags & PTHREAD_NOTLAZYSTACK_MASK;
		}
		if(thp->un.lcl.stacksize == 0) {
			if(__cpu_flags & CPU_FLAG_MMU) {
				thp->un.lcl.stacksize = DEF_VIRTUAL_THREAD_STACKSIZE;
			} else {
				thp->un.lcl.stacksize = DEF_PHYSICAL_THREAD_STACKSIZE;
			}
		}
		if(!thp->un.lcl.stackaddr) {
			lock_kernel();

			if(thp->process->pid != PROCMGR_PID && procmgr.process_stack_code) {
				unspecret_kernel();

				if(thp->state != STATE_STACK) {
					// Must do modification of user address spaces at process time
					struct sigevent		event;

					CRASHCHECK(thp != actives[KERNCPU]);

					event.sigev_notify = SIGEV_PULSE;
					event.sigev_coid = PROCMGR_COID;
					event.sigev_value.sival_int = SYNC_OWNER(thp);
					event.sigev_priority = thp->priority;
					event.sigev_code = procmgr.process_stack_code;

					if(sigevent_proc(&event)) {
						// Pulse failed...
						thp->status = (void *)EAGAIN;
						thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
						return;
					}

					// we may not be running after sigevent_proc()
					unready(thp, STATE_STACK);
					thp->prev.thread = (void *)guardsize;
					thp->next.thread = (void *)lazystate;
					thp->status = (void *)prealloc;
				}
				return;
			}

			guardsize = 0;
			if(procmgr_stack_alloc(thp) != EOK) {
				thp->status = (void *)EAGAIN;
				thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
				return;
			}
			thp->flags |= _NTO_TF_ALLOCED_STACK;
			unlock_kernel();
			SPECRET_PREEMPT(thp);
		}
	}

	// Inherit or assign a scheduling policy and params.
	if(attr) {
		if(attr->__flags & PTHREAD_MULTISIG_DISALLOW) {
			thp->flags |= _NTO_TF_NOMULTISIG;
		}
		thp->args.wa.exitfunc = attr->__exitfunc;
	}

	// Clear detach state if there is a parent

	// Get the *real* attribute structure pointer - we may have
	// NULL'd out thp->args.wa.attr and then been preempted
	attr = thp->args.wa.real_attr;
	if(thp->join && (!attr || !(attr->__flags & PTHREAD_CREATE_DETACHED))) {
		thp->flags &= ~_NTO_TF_DETACHED;
	}

	// Make thread lookups valid
	lock_kernel();
	vector_flag(&thp->process->threads, thp->tid, 0);
	thp->args.wa.attr = 0;

	if(actives[KERNCPU] != thp) {
		return;
	}

	// Load the necessary registers for the thread to start execution.
	stack_top = STACK_INIT((uintptr_t)thp->un.lcl.stackaddr, thp->un.lcl.stacksize);
	STACK_ALLOC(thp->un.lcl.tls, new_sp, stack_top, sizeof *thp->un.lcl.tls);
	STACK_ALLOC(init_cc, new_sp, new_sp, STACK_INITIAL_CALL_CONVENTION_USAGE);
	SETKSP(thp, new_sp);

	// Could fault again while setting tls in stack...
	unlock_kernel();
	SPECRET_PREEMPT(thp);

	SET_XFER_HANDLER(&threadstack_fault_handlers);

	tsp = thp->un.lcl.tls;
	memset(tsp, 0, sizeof(*tsp));
	// Set the inital calling convention usage section to zero - will
	// help any stack traceback code to determine when it has hit the
	// top of the stack.
	memset(init_cc, 0, STACK_INITIAL_CALL_CONVENTION_USAGE);

	if(attr) {
		tsp->__flags = attr->__flags & (PTHREAD_CSTATE_MASK|PTHREAD_CTYPE_MASK);
	}
	tsp->__arg = thp->args.wa.arg;
	tsp->__exitfunc = thp->args.wa.exitfunc;
	if(tsp->__exitfunc == NULL && thp->process->valid_thp != NULL) {
		/*
			We don't have thread termination (exitfunc) for this thread.
			Likely it was created with SIGEV_THREAD. Use the same one
			as for the valid_thp's. This mostly works since all threads
			created via pthread_create have the same exit function.
		*/
		tsp->__exitfunc = thp->process->valid_thp->un.lcl.tls->__exitfunc;
	}

	tsp->__errptr = &tsp->__errval;
	if(thp->process->pid == PROCMGR_PID) {
		tsp->__stackaddr = (uint8_t *)thp->un.lcl.stackaddr;
	} else {
		tsp->__stackaddr = (uint8_t *)thp->un.lcl.stackaddr + ((attr == NULL) ? 0 : attr->__guardsize);
	}
	tsp->__pid = thp->process->pid;
	tsp->__tid = thp->tid + 1;
	tsp->__owner = SYNC_OWNER(thp);

	// Touch additional stack if requested in attr
	// @@@ NYI
	// if(attr->guaranteedstacksize) ...

	SET_XFER_HANDLER(NULL);

	cpu_thread_waaa(thp);

	// Let the parent continue. The tid was stuffed during thread_create().
	if(thp->join && thp->join->state == STATE_WAITTHREAD) {
		lock_kernel();
		ready(thp->join);
		thp->join = NULL;
	}

	//
	// Don't change priority until parent thread freed to run again
	// - we might get a priority inversion otherwise.
	//
	if((attr != NULL) && (attr->__flags & PTHREAD_EXPLICIT_SCHED)) {
		lock_kernel();

		if(sched_thread(thp, attr->__policy, (struct sched_param *)&attr->__param) != EOK) {
			/* We should have some error handling if sched_thread() fails ...
			thp->status = (void *)EAGAIN;
			thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
			return;
			*/
		}
	}

	/* Only done once for the first thread running */
	if(thp->process->process_priority == 0) {
		thp->process->process_priority = thp->priority;
	}

	/* a thread is born unto a STOPPED process - make sure it stops too! */
	if ( thp->process->flags & (_NTO_PF_DEBUG_STOPPED|_NTO_PF_STOPPED) ) {
		thp->flags |= _NTO_TF_TO_BE_STOPPED;
	}
	thp->flags &= ~_NTO_TF_WAAA;
}