Esempio n. 1
0
boolean_t
swtch_pri(
__unused	struct swtch_pri_args *args)
{
	register processor_t	myprocessor;
	boolean_t				result;

	disable_preemption();
	myprocessor = current_processor();
	if (SCHED(processor_queue_empty)(myprocessor) && rt_runq.count == 0) {
		mp_enable_preemption();

		return (FALSE);
	}
	enable_preemption();

	counter(c_swtch_pri_block++);

	thread_depress_abstime(thread_depress_time);

	thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);

	thread_depress_abort_internal(current_thread());

	disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	enable_preemption();

	return (result);
}
Esempio n. 2
0
int
pthread_join(pthread_t tid, void **status)
{
	pthread_thread_t	*joinee, *joiner;

	if ((joinee = tidtothread(tid)) == NULL_THREADPTR)
		return EINVAL;
	
	joiner = CURPTHREAD();

	assert_preemption_enabled();
	disable_preemption();

	pthread_lock(&(joinee->lock));
	if (joinee->flags & THREAD_DETACHED) {
		pthread_unlock(&(joinee->lock));
		enable_preemption();
		return EINVAL;
	}
	pthread_unlock(&(joinee->lock));
	enable_preemption();

	/*
	 * Use a mutex here. This avoids specialized handling in the cancel
	 * and signal code. It works becase the "dead" flag is independent,
	 * protected by a spinning mutex in the reaper code. 
	 */
	pthread_mutex_lock(&joinee->mutex);
	while (!joinee->dead) {
		/*
		 * join must be called with cancelation DEFERRED!
		 */
		pthread_testcancel();

		pthread_cond_wait(&joinee->cond, &joinee->mutex);
	}

	/*
	 * Not allowed to detach the target thread if this thread is canceled.
	 */
	pthread_testcancel();

	disable_preemption();
	if (status)
		*status = (void *) joinee->exitval;

	pthread_mutex_unlock(&joinee->mutex);
	pthread_destroy_internal(joinee);
	enable_preemption();

	return 0;
}
Esempio n. 3
0
/*
 * The idle thread exists to spin looking for threads to run, and provide
 * a place for pthread_delay() to get called so that in usermode CPU time
 * is not wildly consumed.
 */
void *
pthread_idle_function(void *arg)
{
	DPRINTF("(%d): starting\n", THISCPU);

	/*
	 * The idle thread is run preemption disabled.
	 */
	disable_preemption();

	while (1) {
		assert_interrupts_enabled();
		assert_preemption_disabled();

		pthread_reap_threads();

		/*
		 * If nothing to schedule call the delay function
		 */
		if (!pthread_sched_reschedule(RESCHED_INTERNAL, 0))
			pthread_delay();
	}

	return 0;
}
Esempio n. 4
0
bool kmutex_trylock(kmutex *m)
{
   bool success = false;

   disable_preemption();
   DEBUG_ONLY(check_not_in_irq_handler());

   if (!m->owner_task) {

      /* Nobody owns this mutex, just make this task own it */
      m->owner_task = get_curr_task();
      success = true;

      if (m->flags & KMUTEX_FL_RECURSIVE)
         m->lock_count++;

   } else {

      /*
       * There IS an owner task, but we can still acquire the mutex if:
       *    - the mutex is recursive
       *    - the task holding it is actually the current task
       */

      if (m->flags & KMUTEX_FL_RECURSIVE) {
         if (kmutex_is_curr_task_holding_lock(m)) {
            m->lock_count++;
            success = true;
         }
      }
   }

   enable_preemption();
   return success;
}
Esempio n. 5
0
File: debug.c Progetto: UIKit0/xnu
static spl_t
panic_prologue(const char *str)
{
	spl_t	s;

	if (kdebug_enable) {
		ml_set_interrupts_enabled(TRUE);
		kdbg_dump_trace_to_file("/var/tmp/panic.trace");
	}

	s = splhigh();
	disable_preemption();

#if	defined(__i386__) || defined(__x86_64__)
	/* Attempt to display the unparsed panic string */
	const char *tstr = str;

	kprintf("Panic initiated, string: ");
	while (tstr && *tstr)
		kprintf("%c", *tstr++);
	kprintf("\n");
#endif

	panic_safe();

#ifndef __arm__ 	/* xxx show all panic output for now. */
	if( logPanicDataToScreen )
#endif		
		disable_debug_output = FALSE;
	debug_mode = TRUE;

restart:
	PANIC_LOCK();

	if (panicstr) {
		if (cpu_number() != paniccpu) {
			PANIC_UNLOCK();
			/*
			 * Wait until message has been printed to identify correct
			 * cpu that made the first panic.
			 */
			while (panicwait)
				continue;
			goto restart;
	    } else {
			nestedpanic +=1;
			PANIC_UNLOCK();
			Debugger("double panic");
			printf("double panic:  We are hanging here...\n");
			panic_stop();
			/* NOTREACHED */
		}
	}
	panicstr = str;
	paniccpu = cpu_number();
	panicwait = 1;

	PANIC_UNLOCK();
	return(s);
}
Esempio n. 6
0
void kmutex_lock(kmutex *m)
{
   disable_preemption();
   DEBUG_ONLY(check_not_in_irq_handler());

   if (!m->owner_task) {

      /* Nobody owns this mutex, just make this task own it */
      m->owner_task = get_curr_task();

      if (m->flags & KMUTEX_FL_RECURSIVE) {
         ASSERT(m->lock_count == 0);
         m->lock_count++;
      }

      enable_preemption();
      return;
   }

   if (m->flags & KMUTEX_FL_RECURSIVE) {

      ASSERT(m->lock_count > 0);

      if (kmutex_is_curr_task_holding_lock(m)) {
         m->lock_count++;
         enable_preemption();
         return;
      }

   } else {
      ASSERT(!kmutex_is_curr_task_holding_lock(m));
   }

#if KMUTEX_STATS_ENABLED
   m->num_waiters++;
   m->max_num_waiters = MAX(m->num_waiters, m->max_num_waiters);
#endif

   task_set_wait_obj(get_curr_task(), WOBJ_KMUTEX, m, &m->wait_list);
   enable_preemption();
   kernel_yield(); // Go to sleep until someone else is holding the lock.

   /* ------------------- We've been woken up ------------------- */

#if KMUTEX_STATS_ENABLED
   m->num_waiters--;
#endif

   /* Now for sure this task should hold the mutex */
   ASSERT(kmutex_is_curr_task_holding_lock(m));

   /*
    * DEBUG check: in case we went to sleep with a recursive mutex, then the
    * lock_count must be just 1 now.
    */
   if (m->flags & KMUTEX_FL_RECURSIVE) {
      ASSERT(m->lock_count == 1);
   }
}
__private_extern__ kern_return_t
chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
{
	int				thisCPU;
	kern_return_t			retval = KERN_FAILURE;
	chudcpu_signal_request_t	request;
	uint64_t			deadline;
	chudcpu_data_t			*target_chudp;
	boolean_t old_level;

	disable_preemption();
	// force interrupts on for a cross CPU signal.
	old_level = chudxnu_set_interrupts_enabled(TRUE);
	thisCPU = cpu_number();

	if ((unsigned) otherCPU < real_ncpus &&
	    thisCPU != otherCPU &&
	    cpu_data_ptr[otherCPU]->cpu_running) {

		target_chudp = (chudcpu_data_t *)
					cpu_data_ptr[otherCPU]->cpu_chud;

		/* Fill out request */
		request.req_sync = 0xFFFFFFFF;		/* set sync flag */
		//request.req_type = CPRQchud;		/* set request type */
		request.req_code = request_code;	/* set request */

		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_CHUD,
				     CHUD_CPUSIG_SEND) | DBG_FUNC_NONE,
			otherCPU, request_code, 0, 0, 0);

		/*
		 * Insert the new request in the target cpu's request queue
		 * and signal target cpu.
		 */
		mpenqueue_tail(&target_chudp->cpu_request_queue,
			       &request.req_entry);
		i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);

		/* Wait for response or timeout */
		deadline = mach_absolute_time() + LockTimeOut;
		while (request.req_sync != 0) {
			if (mach_absolute_time() > deadline) {
				panic("chudxnu_cpusig_send(%d,%d) timed out\n",
					otherCPU, request_code);
			}
			cpu_pause();
		}
		retval = KERN_SUCCESS;
	} else {
		retval = KERN_INVALID_ARGUMENT;
	}

	chudxnu_set_interrupts_enabled(old_level);
	enable_preemption();
	return retval;
}
Esempio n. 8
0
static void
swtch_continue(void)
{
	register processor_t	myprocessor;
    boolean_t				result;

    disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	enable_preemption();

	thread_syscall_return(result);
	/*NOTREACHED*/
}
Esempio n. 9
0
File: debug.c Progetto: Prajna/xnu
void
log(__unused int level, char *fmt, ...)
{
	va_list	listp;

#ifdef lint
	level++;
#endif /* lint */
#ifdef	MACH_BSD
	disable_preemption();
	va_start(listp, fmt);
	_doprnt(fmt, &listp, conslog_putc, 0);
	va_end(listp);
	enable_preemption();
#endif
}
Esempio n. 10
0
/*
 *	Start up the first thread on a CPU.
 *	First thread is specified for the master CPU.
 */
void
cpu_launch_first_thread(
	register thread_t	th)
{
	register int	mycpu;

	mycpu = cpu_number();

#if	MACH_ASSERT
	if (watchacts & WA_BOOT)
		printf("cpu_launch_first_thread(%x) cpu=%d\n", th, mycpu);
#endif	/* MACH_ASSERT */

	cpu_up(mycpu);

	start_timer(&kernel_timer[mycpu]);

	/*
	 * Block all interrupts for choose_thread.
	 */
	(void) splhigh();

	if (th == THREAD_NULL) {
	    th = cpu_to_processor(mycpu)->idle_thread;
		if (th == THREAD_NULL || !rem_runq(th))
		    panic("cpu_launch_first_thread");
	}

	rtclock_reset();		/* start realtime clock ticking */
	PMAP_ACTIVATE_KERNEL(mycpu);

	thread_machine_set_current(th);
	thread_lock(th);
	th->state &= ~TH_UNINT;
	thread_unlock(th);
	timer_switch(&th->system_timer);

	PMAP_ACTIVATE_USER(th->top_act, mycpu);

	assert(mycpu == cpu_number());

        /* The following is necessary to keep things balanced */
        disable_preemption();

	load_context(th);
	/*NOTREACHED*/
}
Esempio n. 11
0
void kmutex_unlock(kmutex *m)
{
   disable_preemption();

   DEBUG_ONLY(check_not_in_irq_handler());
   ASSERT(kmutex_is_curr_task_holding_lock(m));

   if (m->flags & KMUTEX_FL_RECURSIVE) {

      ASSERT(m->lock_count > 0);

      if (--m->lock_count > 0) {
         enable_preemption();
         return;
      }

      // m->lock_count == 0: we have to really unlock the mutex
   }

   m->owner_task = NULL;

   /* Unlock one task waiting to acquire the mutex 'm' (if any) */
   if (!list_is_empty(&m->wait_list)) {

      wait_obj *task_wo =
         list_first_obj(&m->wait_list, wait_obj, wait_list_node);

      task_info *ti = CONTAINER_OF(task_wo, task_info, wobj);

      m->owner_task = ti;

      if (m->flags & KMUTEX_FL_RECURSIVE)
         m->lock_count++;

      ASSERT(ti->state == TASK_STATE_SLEEPING);
      task_reset_wait_obj(ti);

   } // if (!list_is_empty(&m->wait_list))

   enable_preemption();
}
Esempio n. 12
0
void soft_interrupt_entry(regs *r)
{
   const int int_num = regs_intnum(r);
   ASSERT(!are_interrupts_enabled());

   if (int_num == SYSCALL_SOFT_INTERRUPT)
      DEBUG_check_preemption_enabled_for_usermode();

   push_nested_interrupt(int_num);
   disable_preemption();

   if (int_num == SYSCALL_SOFT_INTERRUPT) {

      enable_interrupts_forced();
      {
         handle_syscall(r);
      }
      disable_interrupts_forced(); /* restore IF = 0 */

   } else {

      /*
       * General rule: fault handlers get control with interrupts disabled but
       * they are supposed to call enable_interrupts_forced() ASAP.
       */
      handle_fault(r);

      /* Faults are expected to return with interrupts disabled. */
      ASSERT(!are_interrupts_enabled());
   }

   enable_preemption();
   pop_nested_interrupt();

   if (int_num == SYSCALL_SOFT_INTERRUPT)
      DEBUG_check_preemption_enabled_for_usermode();
}
Esempio n. 13
0
File: debug.c Progetto: Prajna/xnu
void
panic(const char *str, ...)
{
	va_list	listp;
	spl_t	s;
	thread_t thread;
	wait_queue_t wq;

#if	defined(__i386__) || defined(__x86_64__)
	/* Attempt to display the unparsed panic string */
	const char *tstr = str;

	kprintf("Panic initiated, string: ");
	while (tstr && *tstr)
		kprintf("%c", *tstr++);
	kprintf("\n");
#endif
	if (kdebug_enable)
		kdbg_dump_trace_to_file("/var/tmp/panic.trace");

	s = splhigh();
	disable_preemption();

	panic_safe();

	thread = current_thread();		/* Get failing thread */
	wq = thread->wait_queue;		/* Save the old value */
	thread->wait_queue = NULL;		/* Clear the wait so we do not get double panics when we try locks */

	if( logPanicDataToScreen )
		disable_debug_output = FALSE;
		
	debug_mode = TRUE;

	/* panic_caller is initialized to 0.  If set, don't change it */
	if ( ! panic_caller )
		panic_caller = (unsigned long)(char *)__builtin_return_address(0);
	
restart:
	PANIC_LOCK();
	if (panicstr) {
		if (cpu_number() != paniccpu) {
			PANIC_UNLOCK();
			/*
			 * Wait until message has been printed to identify correct
			 * cpu that made the first panic.
			 */
			while (panicwait)
				continue;
			goto restart;
	    } else {
			nestedpanic +=1;
			PANIC_UNLOCK();
			Debugger("double panic");
			printf("double panic:  We are hanging here...\n");
			panic_stop();
			/* NOTREACHED */
		}
	}
	panicstr = str;
	paniccpu = cpu_number();
	panicwait = 1;

	PANIC_UNLOCK();
	kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu, panic_caller);
	if (str) {
		va_start(listp, str);
		_doprnt(str, &listp, consdebug_putc, 0);
		va_end(listp);
	}
	kdb_printf("\n");

	/*
	 * Release panicwait indicator so that other cpus may call Debugger().
	 */
	panicwait = 0;
	Debugger("panic");
	/*
	 * Release panicstr so that we can handle normally other panics.
	 */
	PANIC_LOCK();
	panicstr = (char *)0;
	PANIC_UNLOCK();
	thread->wait_queue = wq; 	/* Restore the wait queue */

	if (return_on_panic) {
		panic_normal();
		enable_preemption();
		splx(s);
		return;
	}

	kdb_printf("panic: We are hanging here...\n");
	panic_stop();
	/* NOTREACHED */
}
Esempio n. 14
0
__private_extern__ void
chudxnu_disable_preemption(void)
{
	disable_preemption();
}
Esempio n. 15
0
boolean_t
kdp_i386_trap(
    unsigned int		trapno,
    x86_saved_state64_t	*saved_state,
    kern_return_t	result,
    vm_offset_t		va
)
{
    unsigned int exception, subcode = 0, code;

    if (trapno != T_INT3 && trapno != T_DEBUG) {
    	kprintf("Debugger: Unexpected kernel trap number: "
		"0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
		trapno, saved_state->isf.rip, saved_state->cr2);
	if (!kdp.is_conn)
	    return FALSE;
    }	

    mp_kdp_enter();
    kdp_callouts(KDP_EVENT_ENTER);

    if (saved_state->isf.rflags & EFL_TF) {
	    enable_preemption_no_check();
    }

    switch (trapno) {
    
    case T_DIVIDE_ERROR:
	exception = EXC_ARITHMETIC;
	code = EXC_I386_DIVERR;
	break;
    
    case T_OVERFLOW:
	exception = EXC_SOFTWARE;
	code = EXC_I386_INTOFLT;
	break;
    
    case T_OUT_OF_BOUNDS:
	exception = EXC_ARITHMETIC;
	code = EXC_I386_BOUNDFLT;
	break;
    
    case T_INVALID_OPCODE:
	exception = EXC_BAD_INSTRUCTION;
	code = EXC_I386_INVOPFLT;
	break;
    
    case T_SEGMENT_NOT_PRESENT:
	exception = EXC_BAD_INSTRUCTION;
	code = EXC_I386_SEGNPFLT;
	subcode	= (unsigned int)saved_state->isf.err;
	break;
    
    case T_STACK_FAULT:
	exception = EXC_BAD_INSTRUCTION;
	code = EXC_I386_STKFLT;
	subcode	= (unsigned int)saved_state->isf.err;
	break;
    
    case T_GENERAL_PROTECTION:
	exception = EXC_BAD_INSTRUCTION;
	code = EXC_I386_GPFLT;
	subcode	= (unsigned int)saved_state->isf.err;
	break;
	
    case T_PAGE_FAULT:
    	exception = EXC_BAD_ACCESS;
	code = result;
	subcode = (unsigned int)va;
	break;
    
    case T_WATCHPOINT:
	exception = EXC_SOFTWARE;
	code = EXC_I386_ALIGNFLT;
	break;
	
    case T_DEBUG:
    case T_INT3:
	exception = EXC_BREAKPOINT;
	code = EXC_I386_BPTFLT;
	break;

    default:
    	exception = EXC_BAD_INSTRUCTION;
	code = trapno;
	break;
    }

    if (current_cpu_datap()->cpu_fatal_trap_state) {
	    current_cpu_datap()->cpu_post_fatal_trap_state = saved_state;
	    saved_state = current_cpu_datap()->cpu_fatal_trap_state;
    }

    kdp_raise_exception(exception, code, subcode, saved_state);
    /* If the instruction single step bit is set, disable kernel preemption
     */
    if (saved_state->isf.rflags & EFL_TF) {
	    disable_preemption();
    }

    kdp_callouts(KDP_EVENT_EXIT);
    mp_kdp_exit();

    return TRUE;
}
Esempio n. 16
0
struct savearea * interrupt(
        int type,
        struct savearea *ssp,
	unsigned int dsisr,
	unsigned int dar)
{
	int	current_cpu;
	struct per_proc_info	*proc_info;
	uint64_t		now;
	thread_t		thread;

	disable_preemption();

	perfCallback fn = perfIntHook;
	if(fn) {							/* Is there a hook? */
		if(fn(type, ssp, dsisr, dar) == KERN_SUCCESS) return ssp;	/* If it succeeds, we are done... */
	}
	
#if CONFIG_DTRACE
	if(tempDTraceIntHook) {							/* Is there a hook? */
		if(tempDTraceIntHook(type, ssp, dsisr, dar) == KERN_SUCCESS) return ssp;	/* If it succeeds, we are done... */
	}
#endif

#if 0
	{
		extern void fctx_text(void);
		fctx_test();
	}
#endif


	current_cpu = cpu_number();
	proc_info = getPerProc();

	switch (type) {

		case T_DECREMENTER:
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
				  isync_mfdec(), (unsigned int)ssp->save_srr0, 0, 0, 0);
	
			now = mach_absolute_time();				/* Find out what time it is */
			
			if(now >= proc_info->pms.pmsPop) {		/* Is it time for power management state change? */
				pmsStep(1);							/* Yes, advance step */
				now = mach_absolute_time();			/* Get the time again since we ran a bit */
			}

			thread = current_thread();					/* Find ourselves */
			if(thread->machine.qactTimer != 0) {	/* Is the timer set? */
				if (thread->machine.qactTimer <= now) {	/* It is set, has it popped? */
					thread->machine.qactTimer = 0;		/* Clear single shot timer */
					if((unsigned int)thread->machine.vmmControl & 0xFFFFFFFE) {	/* Are there any virtual machines? */
						vmm_timer_pop(thread);			/* Yes, check out them out... */
					}
				}
			}

			etimer_intr(USER_MODE(ssp->save_srr1), ssp->save_srr0);	/* Handle event timer */
			break;
	
		case T_INTERRUPT:
			/* Call the platform interrupt routine */
			counter(c_incoming_interrupts++);
	
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
			   current_cpu, (unsigned int)ssp->save_srr0, 0, 0, 0);
	
#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
			DTRACE_INT5(interrupt__start, void *, proc_info->interrupt_nub, int, proc_info->interrupt_source, 
						void *, proc_info->interrupt_target, IOInterruptHandler, proc_info->interrupt_handler,
						void *, proc_info->interrupt_refCon);
#endif

			proc_info->interrupt_handler(
				proc_info->interrupt_target, 
				proc_info->interrupt_refCon,
				proc_info->interrupt_nub, 
				proc_info->interrupt_source);
	
#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
			DTRACE_INT5(interrupt__complete, void *, proc_info->interrupt_nub, int, proc_info->interrupt_source, 
						void *, proc_info->interrupt_target, IOInterruptHandler, proc_info->interrupt_handler,
						void *, proc_info->interrupt_refCon);
#endif

			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
			   0, 0, 0, 0, 0);
	
			break;
	
		case T_SIGP:
			/* Did the other processor signal us? */ 
			cpu_signal_handler();
			break;
	
		case T_SHUTDOWN:
			cpu_doshutdown();
			panic("returning from cpu_doshutdown()\n");
			break;
	
				
		default:
			if (!Call_Debugger(type, ssp))
				unresolved_kernel_trap(type, ssp, dsisr, dar, NULL);
			break;
	}

	enable_preemption();
	return ssp;
}