예제 #1
0
int
_fiber_sleep (void *event, TVAL timeout)
{
  thread_t *thr = _current_fiber;

  assert (thr->thr_status == RUNNING);

  thr->thr_err = 0;
  thr->thr_retcode = 0;

  /* set a timer */
  assert (thr->thr_timer == NULL);

  if (timeout != TV_INFINITE)
    thr->thr_timer = timer_queue_new_timer (_timerq, timeout, 0,
	_fiber_timeout, thr);

  if (event == NULL)
    event = &_ev_never;
  thr->thr_event = event;

  do
    {
      _fiber_status (thr, WAITEVENT);
      _fiber_schedule_next ();
    }
  while (thr->thr_event == event && thr->thr_err == 0);

  thr->thr_event = NULL;

  if (timeout != TV_INFINITE)
    {
      timer_deactivate (thr->thr_timer);
      timer_unref (thr->thr_timer);
      thr->thr_timer = NULL;
    }

  return thr->thr_retcode;
}
예제 #2
0
파일: nano_thread.c 프로젝트: vocho/openqnx
/**
//
// By design this function will only be called by a thread in the same
// process as the one being destroyed. It might very well be the same
// thread destroying itself. The code does not currently require this
// but it is good to remember this for the future.
//*/
void rdecl
thread_destroy(THREAD *thp) {
	PROCESS 	*prp = thp->process;
	PULSE 		*pup;
	int 		i;
	SYNC 		*syp;

chk_lock();

	_TRACE_TH_EMIT_DESTROY(thp);
	if(thp->state != STATE_DEAD) {
		if(thp->state == STATE_RUNNING  &&  thp != actives[KERNCPU]) {
			// The thread is running on another processor in an SMP system.
			thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
			SENDIPI(thp->runcpu, IPI_RESCHED);
			return;
		}
		// Remove thread from any queues.
		if(!force_ready(thp, _FORCE_KILL_SELF)) {
			// Couldn't kill this thread right now, do it later
			return;
		}

		// Remove any fpu buffer before the thread becomes dead
		if(thp->fpudata) {
			FPU_REGISTERS	*fpudata = FPUDATA_PTR(thp->fpudata);

			if(FPUDATA_INUSE(thp->fpudata)) {
				if(FPUDATA_CPU(thp->fpudata) != KERNCPU) {
					// Context still in use on another CPU; need to flush it out
					thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
					SENDIPI(FPUDATA_CPU(thp->fpudata), IPI_CONTEXT_SAVE);
					return;
				}
			}

			if(actives_fpu[KERNCPU] == thp) {
				actives_fpu[KERNCPU] = NULL;
			}
			atomic_order(thp->fpudata = NULL);
			object_free(NULL, &fpu_souls, fpudata);
		}

		SIGMASK_ONES(&thp->sig_blocked);

		if(thp->flags & _NTO_TF_RCVINFO) {
			thp->args.ri.thp->restart = 0;
		}
		if(thp->flags & _NTO_TF_SHORT_MSG) {
			((THREAD *)thp->blocked_on)->restart = 0;
		}

		thp->timeout_flags = 0;
		if(thp->timeout) {
			timer_free(prp, thp->timeout);
			thp->timeout = NULL;
		}


		// Now remove thread from the ready queue.
		// Call unready() instead of block() the thread may not be active.
		unready(thp, STATE_DEAD);


        // The thread is definitely no longer in actives[].

		/* Give the vendor extension a chance to take first dibs */
		if ( kerop_thread_destroy_hook != NULL ) {
			kerop_thread_destroy_hook(thp);
		}

		// purge mutex hold list
		while((syp = thp->mutex_holdlist)) {
			THREAD *waiting = pril_first(&syp->waiting);
			CRASHCHECK(waiting->args.mu.owner == 0);
			mutex_holdlist_rem(syp);
			waiting->args.mu.owner = 0;
		}

		// clear client field
		if(thp->client != 0) {
			/* need to clear client's server field */
			thp->client->args.ms.server = 0;
			thp->client = 0;
		}

		// Release the stack if it was dynamically allocated
		if(thp->flags & _NTO_TF_ALLOCED_STACK) {
			if(prp->pid != PROCMGR_PID && procmgr.process_stack_code) {
				if(thp->state != STATE_STACK) {
					struct sigevent		event;

					// Must do modification of user address spaces at process time
					thp->state = STATE_STACK;

					_TRACE_TH_EMIT_STATE(thp, STACK);
					thp->flags |= (_NTO_TF_KILLSELF | _NTO_TF_ONLYME);
					thp->flags &= ~(_NTO_TF_TO_BE_STOPPED | _NTO_TF_WAAA);

					event.sigev_notify = SIGEV_PULSE;
					event.sigev_coid = PROCMGR_COID;
					event.sigev_value.sival_int = SYNC_OWNER(thp);
					event.sigev_priority = thp->priority;
					event.sigev_code = procmgr.process_stack_code;

					if(sigevent_proc(&event)) {
						// Very bad; we are out of pulses... This means
						// there's absolutely no memory left in the system.
						CRASHCHECK(1);
					}
				}
				return;
			}
			procmgr_stack_free(thp);
			thp->flags &= ~_NTO_TF_ALLOCED_STACK;
		}

		// Make thread lookups invalid
		vector_flag(&prp->threads, thp->tid, 1);

		// If there is still active threads retarget valid_thp
		if(--prp->num_active_threads  &&  prp->valid_thp == thp) {
			THREAD				*thp2, *thp3 = NULL;

			for(i = 0; i < prp->threads.nentries; i++) {

				// VECP() will not match our thread.
				if(VECP(thp2, &prp->threads, i)) {
					prp->valid_thp = thp2;
					break;
				} else if((thp2 = VECP2(thp2, &prp->threads, i)) && thp2->state != STATE_DEAD) {
					thp3 = thp2;
				}
			}

			// Could happen if we only have threads starting up left
			if(prp->valid_thp == thp) {
				prp->valid_thp = thp3;
			}
		} else if(!prp->num_active_threads) {
			prp->valid_thp = NULL;
		}

		// If killing thread that last processed a signal, update
		// signal tid cache.
		if(thp->tid == prp->sigtid_cache) {
			THREAD	*vthp = prp->valid_thp;

			prp->sigtid_cache = (vthp != NULL) ? vthp->tid : 0;
		}

		// Deactivate any timers targeting this thread
		for(i = 0; i < prp->timers.nentries; ++i) {
			TIMER	 *tip;

			if(VECP(tip, &prp->timers, i) && (tip->flags & _NTO_TI_ACTIVE) &&
					(prp->num_active_threads == 0 || tip->thread == thp)) {
				if((tip->flags & _NTO_TI_TARGET_PROCESS) && (prp->valid_thp != NULL)) {
					// have to retarget to a new thread
					tip->thread = prp->valid_thp;
				} else {
					timer_deactivate(tip);
				}
			}
		}
		if(prp->alarm != NULL) {
			TIMER *tip = prp->alarm;

			// Alarm timers are always process based.
			if(tip->thread == thp) {
				tip->thread = prp->valid_thp;
			}
		}

		// Clean up after a SPORADIC thread
		if(thp->policy == SCHED_SPORADIC) {
			sched_ss_cleanup(thp);
		}

		// Release any attached interrupts
		if(prp->flags & _NTO_PF_CHECK_INTR) {
			INTERRUPT	*itp;

			for(i = 0; i < interrupt_vector.nentries; ++i) {
				if(VECP(itp, &interrupt_vector, i) && itp->thread->process == prp) {
					// Only detach interrupt if bound to thread, or all threads are inactive
					if(prp->num_active_threads == 0) {
						interrupt_detach_entry(prp, i);
					} else if(itp->thread != thp) {
						/* nothing to do */
					} else if(itp->flags & _NTO_INTR_FLAGS_PROCESS) {
						itp->thread = prp->valid_thp;
					} else {
						interrupt_detach_entry(prp, i);
					}
				}
			}
		}

		// timers/interrupts have been detached, so flush the interrupt
		// queues to get rid of anything pending that's pointing at
		// this thread
		intrevent_flush();

		// Purge any enqueued signals
		for( ;; ) {
			pup = pril_first(&thp->sig_pending);
			if(pup == NULL) break;
			pril_rem(&thp->sig_pending, pup);
			object_free(prp, &pulse_souls, pup);
		}

		// If requested send a death pulse to a channel.
		if(prp->death_chp && (prp->death_chp->flags & _NTO_CHF_THREAD_DEATH)) {
			pulse_deliver(prp->death_chp, thp->real_priority, _PULSE_CODE_THREADDEATH, thp->tid+1, -1, 0);
		}
	}

	// A zombie if nobody waiting, not detached and not last thread.
	if(thp->join == NULL  &&  (thp->flags & _NTO_TF_DETACHED) == 0  &&
	   (prp->threads.nentries-1 != prp->threads.nfree)) {
		return;
	}


	// Wakeup any thread waiting to join on me.
	if(thp->join) {
		THREAD *jthp = thp->join;

		if(jthp->state == STATE_WAITTHREAD) {
			// This thread was being created....
			kererr(jthp, (intptr_t)thp->status);
		} else {
			// This is a normal thread death...
			jthp->args.jo.status = thp->status;
			jthp->flags |= _NTO_TF_JOIN;
			SETKSTATUS(jthp, EOK);
		}
		ready(jthp);
	}

	// FPU buffer should be freed already
	if(thp->fpudata) crash();

	// Remove thread from process thread vector.
	thp = vector_rem(&prp->threads, thp->tid);
	if(thp == NULL) {
		crash();
	}

	// Remove any CPU-specific save buffers
	cpu_thread_destroy(thp);
#ifdef _mt_LTT_TRACES_	/* PDB */
	//mt_TRACE_DEBUG("2 !");
	mt_trace_task_delete(thp->process->pid, thp->tid, 0);
#endif

	// Remember the priority for the pulses below
	i = thp->real_priority;

	thp->schedinfo.rr_ticks = 0; /* sanity check before we return it to the pool */

	// Free the name if it was allocated
	if(thp->name != NULL) {
		int name_len = strlen(thp->name) + 1;
		if(name_len >= THREAD_NAME_FIXED_SIZE) {
   		     _sfree(thp->name, name_len);
   		 } else {
   		     object_free(thp->process, &threadname_souls, thp->name);
		}
		thp->name = NULL;
	}

	// Release thread object back to the free queue.
	object_free(prp, &thread_souls, thp);

	if(prp->threads.nentries == prp->threads.nfree) {
		// Purge any enqueued signals pending on the process.
		for( ;; ) {
			pup = pril_first(&prp->sig_pending);
			if(pup == NULL) break;
			pril_rem(&prp->sig_pending, pup);
			object_free(prp, &pulse_souls, pup);
		}

		// setup process so it can run as a terminator thread
		prp->flags |= _NTO_PF_TERMING;
		prp->boundry_addr = VM_KERN_SPACE_BOUNDRY;
		SIGMASK_ONES(&prp->sig_ignore);

		// If debugging, don't tell debugger, not process manager
		if(prp->debugger  &&  (*debug_process_exit)(prp, i)) {
			return;
		}

		// If last thread gone, tell the process manager.
		if(procmgr.process_threads_destroyed) {
			struct sigevent		ev;
			/* PDB process_delete */
			_TRACE_DESTROY_EH(prp);
			(*procmgr.process_threads_destroyed)(prp, &ev);
			sigevent_proc(&ev);
		}
	}
}