Ejemplo n.º 1
0
/*
 * thread_fast_set_cthread_self: Sets the machine kernel thread ID of the
 * current thread to the given thread ID; fast version for 32-bit processes
 *
 * Parameters:    self                    Thread ID to set
 *                
 * Returns:        0                      Success
 *                !0                      Not success
 */
kern_return_t
thread_fast_set_cthread_self(uint32_t self)
{
	thread_t thread = current_thread();
	pcb_t pcb = thread->machine.pcb;
	struct real_descriptor desc = {
		.limit_low = 1,
		.limit_high = 0,
		.base_low = self & 0xffff,
		.base_med = (self >> 16) & 0xff,
		.base_high = (self >> 24) & 0xff,
		.access = ACC_P|ACC_PL_U|ACC_DATA_W,
		.granularity = SZ_32|SZ_G,
	};

	current_thread()->machine.pcb->cthread_self = (uint64_t) self;	/* preserve old func too */

	/* assign descriptor */
	mp_disable_preemption();
	pcb->cthread_desc = desc;
	*ldt_desc_p(USER_CTHREAD) = desc;
	saved_state32(pcb->iss)->gs = USER_CTHREAD;
	mp_enable_preemption();

	return (USER_CTHREAD);
}

/*
 * thread_fast_set_cthread_self64: Sets the machine kernel thread ID of the
 * current thread to the given thread ID; fast version for 64-bit processes 
 *
 * Parameters:    self                    Thread ID
 *                
 * Returns:        0                      Success
 *                !0                      Not success
 */
kern_return_t
thread_fast_set_cthread_self64(uint64_t self)
{
	pcb_t pcb = current_thread()->machine.pcb;
	cpu_data_t              *cdp;

	/* check for canonical address, set 0 otherwise  */
	if (!IS_USERADDR64_CANONICAL(self))
		self = 0ULL;

	pcb->cthread_self = self;
	mp_disable_preemption();
	cdp = current_cpu_datap();
#if defined(__x86_64__)
	if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) ||
	    (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE)))
		wrmsr64(MSR_IA32_KERNEL_GS_BASE, self);
#endif
	cdp->cpu_uber.cu_user_gs_base = self;
	mp_enable_preemption();
	return (USER_CTHREAD);
}
Ejemplo n.º 2
0
/*
 * Adjust the Universal (Posix) time gradually.
 */
kern_return_t
host_adjust_time(
	host_t		host,
	time_value_t	newadj,
	time_value_t	*oldadj)	/* OUT */
{
	time_value_t	oadj;
	integer_t	ndelta;
	spl_t		s;

	if (host == HOST_NULL)
		return (KERN_INVALID_HOST);

	ndelta = (newadj.seconds * 1000000) + newadj.microseconds;

#if	NCPUS > 1
	thread_bind(current_thread(), master_processor);
	mp_disable_preemption();
	if (current_processor() != master_processor) {
		mp_enable_preemption();
		thread_block((void (*)(void)) 0);
	} else {
		mp_enable_preemption();
	}
#endif	/* NCPUS > 1 */

	s = splclock();
	oadj.seconds = timedelta / 1000000;
	oadj.microseconds = timedelta % 1000000;
	if (timedelta == 0) {
		if (ndelta > bigadj)
			tickdelta = 10 * tickadj;
		else
			tickdelta = tickadj;
	}
	if (ndelta % tickdelta)
		ndelta = ndelta / tickdelta * tickdelta;
	timedelta = ndelta;
	splx(s);

#if	NCPUS > 1
	thread_bind(current_thread(), PROCESSOR_NULL);
#endif	/* NCPUS > 1 */

	*oldadj = oadj;

	return (KERN_SUCCESS);
}
Ejemplo n.º 3
0
boolean_t
swtch_pri(
__unused	struct swtch_pri_args *args)
{
	register processor_t	myprocessor;
	boolean_t				result;

	disable_preemption();
	myprocessor = current_processor();
	if (SCHED(processor_queue_empty)(myprocessor) && rt_runq.count == 0) {
		mp_enable_preemption();

		return (FALSE);
	}
	enable_preemption();

	counter(c_swtch_pri_block++);

	thread_depress_abstime(thread_depress_time);

	thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);

	thread_depress_abort_internal(current_thread());

	disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	enable_preemption();

	return (result);
}
Ejemplo n.º 4
0
/*
 *	Acquire a usimple_lock.
 *
 *	MACH_RT:  Returns with preemption disabled.  Note
 *	that the hw_lock routines are responsible for
 *	maintaining preemption state.
 */
void
usimple_lock(
	usimple_lock_t	l)
{
	int i;
	unsigned int	timeouttb;				/* Used to convert time to timebase ticks */
	pc_t		pc;
#if	ETAP_LOCK_TRACE
	etap_time_t	start_wait_time;
	int		no_miss_info = 0;
#endif	/* ETAP_LOCK_TRACE */
#if	USLOCK_DEBUG
	int		count = 0;
#endif 	/* USLOCK_DEBUG */

	OBTAIN_PC(pc, l);
	USLDBG(usld_lock_pre(l, pc));
#if	ETAP_LOCK_TRACE
	ETAP_TIME_CLEAR(start_wait_time);
#endif	/* ETAP_LOCK_TRACE */

	while (!hw_lock_try(&l->interlock)) {
		ETAPCALL(if (no_miss_info++ == 0)
			start_wait_time = etap_simplelock_miss(l));
		while (hw_lock_held(&l->interlock)) {
			/*
			 *	Spin watching the lock value in cache,
			 *	without consuming external bus cycles.
			 *	On most SMP architectures, the atomic
			 *	instruction(s) used by hw_lock_try
			 *	cost much, much more than an ordinary
			 *	memory read.
			 */
#if	USLOCK_DEBUG
			if (count++ > max_lock_loops
#if	MACH_KDB && NCPUS > 1
			    && l != &kdb_lock
#endif	/* MACH_KDB && NCPUS > 1 */
			    ) {
				if (l == &printf_lock) {
					return;
				}
				mp_disable_preemption();
#if MACH_KDB
				db_printf("cpu %d looping on simple_lock(%x)"
					  "(=%x)",
					  cpu_number(), l,
					  *hw_lock_addr(l->interlock));
				db_printf(" called by %x\n", pc);
#endif
				Debugger("simple lock deadlock detection");
				count = 0;
				mp_enable_preemption();
			}
#endif 	/* USLOCK_DEBUG */
		}
	}
	ETAPCALL(etap_simplelock_hold(l, pc, start_wait_time));
	USLDBG(usld_lock_post(l, pc));
}
Ejemplo n.º 5
0
static void
ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
{
        mapwindow_t *map;

	mp_disable_preemption();

	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) | 
					  INTEL_PTE_REF | INTEL_PTE_MOD));

        switch (size) {
        case 1:
	    *(unsigned char *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (unsigned char)data;
            break;
        case 2:
	    *(unsigned short *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (unsigned short)data;
            break;
        case 4:
        default:
	    *(unsigned int *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = data;
            break;
        }
        pmap_put_mapwindow(map);

	mp_enable_preemption();
}
Ejemplo n.º 6
0
/* ARGSUSED */
void
usld_lock_pre(
	usimple_lock_t	l,
	pc_t		pc)
{
	char	caller[] = "usimple_lock";


	if (!usld_lock_common_checks(l, caller))
		return;

/*
 *	Note that we have a weird case where we are getting a lock when we are]
 *	in the process of putting the system to sleep. We are running with no
 *	current threads, therefore we can't tell if we are trying to retake a lock
 *	we have or someone on the other processor has it.  Therefore we just
 *	ignore this test if the locking thread is 0.
 */

	if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
	    l->debug.lock_thread == (void *) current_thread()) {
		printf("%s:  lock %p already locked (at %p) by",
		      caller, l, l->debug.lock_pc);
		printf(" current thread %p (new attempt at pc %p)\n",
		       l->debug.lock_thread, pc);
		panic("%s", caller);
	}
	mp_disable_preemption();
	usl_trace(l, cpu_number(), pc, caller);
	mp_enable_preemption();
}
Ejemplo n.º 7
0
static unsigned int
ml_phys_read_data(pmap_paddr_t paddr, int size )
{
        mapwindow_t *map;
	unsigned int result;

	mp_disable_preemption();

	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF));

        switch (size) {
            unsigned char s1;
            unsigned short s2;
        case 1:
            s1 = *(unsigned char *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));
            result = s1;
            break;
        case 2:
            s2 = *(unsigned short *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));
            result = s2;
            break;
        case 4:
        default:
            result = *(unsigned int *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));
            break;
        }
        pmap_put_mapwindow(map);

	mp_enable_preemption();

        return result;
}
Ejemplo n.º 8
0
void
bcopy_phys(
	   addr64_t src64,
	   addr64_t dst64,
	   vm_size_t bytes)
{
        mapwindow_t *src_map, *dst_map;

	/* ensure we stay within a page */
	if ( ((((uint32_t)src64 & (NBPG-1)) + bytes) > NBPG) || ((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) {
	        panic("bcopy_phys alignment");
	}
	mp_disable_preemption();

	src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF));
	dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) |
						  INTEL_PTE_REF | INTEL_PTE_MOD));

	bcopy((void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)),
	      (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK)), bytes);

	pmap_put_mapwindow(src_map);
	pmap_put_mapwindow(dst_map);

	mp_enable_preemption();
}
Ejemplo n.º 9
0
/*
 *	start_timer starts the given timer for this cpu. It is called
 *	exactly once for each cpu during the boot sequence.
 */
void
start_timer(
	register timer_t	timer)
{
	timer->tstamp = get_timestamp();
	mp_disable_preemption();
	current_timer[cpu_number()] = timer;
	mp_enable_preemption();
}
Ejemplo n.º 10
0
/*
 *	Debug checks on a usimple_lock just before
 *	attempting to acquire it.
 *
 *	Preemption isn't guaranteed to be disabled.
 */
void
usld_lock_try_pre(
	usimple_lock_t	l,
	pc_t		pc)
{
	char	caller[] = "usimple_lock_try";

	if (!usld_lock_common_checks(l, caller))
		return;
	mp_disable_preemption();
	usl_trace(l, cpu_number(), pc, caller);
	mp_enable_preemption();
}
Ejemplo n.º 11
0
/*
 * Set the Universal (Posix) time. Privileged call.
 */
kern_return_t
host_set_time(
	host_t		host,
	time_value_t	new_time)
{
	spl_t	s;

	if (host == HOST_NULL)
		return(KERN_INVALID_HOST);

#if	NCPUS > 1
	thread_bind(current_thread(), master_processor);
	mp_disable_preemption();
	if (current_processor() != master_processor) {
		mp_enable_preemption();
		thread_block((void (*)(void)) 0);
	} else {
		mp_enable_preemption();
	}
#endif	/* NCPUS > 1 */

	s = splhigh();
	time = new_time;
	update_mapped_time(&time);
#if	PTIME_MACH_RT
	rtc_gettime_interrupts_disabled((tvalspec_t *)&last_utime_tick);
#endif	/* PTIME_MACH_RT */
#if 0
	(void)bbc_settime((time_value_t *)&time);
#endif
	splx(s);

#if	NCPUS > 1
	thread_bind(current_thread(), PROCESSOR_NULL);
#endif	/* NCPUS > 1 */

	return (KERN_SUCCESS);
}
Ejemplo n.º 12
0
static void
swtch_pri_continue(void)
{
	register processor_t	myprocessor;
    boolean_t				result;

	thread_depress_abort_internal(current_thread());

    disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	mp_enable_preemption();

	thread_syscall_return(result);
	/*NOTREACHED*/
}
Ejemplo n.º 13
0
static void
ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
{
        mapwindow_t *map;

	mp_disable_preemption();

	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) | 
					      INTEL_PTE_REF | INTEL_PTE_MOD));

	*(unsigned long long *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = data;

        pmap_put_mapwindow(map);

	mp_enable_preemption();
}
Ejemplo n.º 14
0
void
bzero_phys(
	   addr64_t src64,
	   uint32_t bytes)
{
        mapwindow_t *map;

        mp_disable_preemption();

	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD));

	bzero((void *)((uintptr_t)map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)), bytes);

	pmap_put_mapwindow(map);

	mp_enable_preemption();
}
Ejemplo n.º 15
0
/*
 *	time_int_exit does interrupt exit timing.  Caller must lock out
 *	interrupts and take a timestamp.  ts is a timestamp taken after
 *	interrupts were locked out.  old_timer is the timer value pushed
 *	onto the stack or otherwise saved after time_int_entry returned
 *	it.
 */
void
time_int_exit(
	unsigned	ts,
	timer_t		old_timer)
{
	int	elapsed;
	int	mycpu;
	timer_t	mytimer;

	mp_disable_preemption();

	/*
	 *	Calculate elapsed time.
	 */
	mycpu = cpu_number();
	mytimer = current_timer[mycpu];
	elapsed = ts - mytimer->tstamp;
#ifdef	TIMER_MAX
	if (elapsed < 0) elapsed += TIMER_MAX;
#endif	/* TIMER_MAX */

	/*
	 *	Update current timer.
	 */
	mytimer->low_bits += elapsed;
	mytimer->tstamp = 0;

	/*
	 *	If normalization requested, do it.
	 */
	if (mytimer->low_bits & TIMER_LOW_FULL) {
		timer_normalize(mytimer);
	}
	if (old_timer->low_bits & TIMER_LOW_FULL) {
		timer_normalize(old_timer);
	}

	/*
	 *	Start timer that was running before interrupt.
	 */
	old_timer->tstamp = ts;
	current_timer[mycpu] = old_timer;

	mp_enable_preemption();
}
Ejemplo n.º 16
0
static unsigned long long
ml_phys_read_long_long(pmap_paddr_t paddr )
{
        mapwindow_t *map;
	unsigned long long result;

	mp_disable_preemption();

	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF));

	result = *(unsigned long long *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));

        pmap_put_mapwindow(map);

	mp_enable_preemption();

        return result;
}
Ejemplo n.º 17
0
/*
 *	Determine whether any usimple_locks are currently held.
 *
 *	MACH_RT:  Caller's preemption state is uncertain.  If
 *	preemption has been disabled, this check is accurate.
 *	Otherwise, this check is just a guess.  We do the best
 *	we can by disabling scheduler interrupts, so at least
 *	the check is accurate w.r.t. whatever cpu we're running
 *	on while in this routine.
 */
void
usld_lock_none_held()
{
	register int	mycpu;
	spl_t		s;
	unsigned int	locks_held;
	char		*caller = "usimple_lock_none_held";

	DISABLE_INTERRUPTS(s);
	mp_disable_preemption();
	mycpu = cpu_number();
	locks_held = uslock_stack_index[mycpu];
	mp_enable_preemption();
	ENABLE_INTERRUPTS(s);
	if (locks_held > 0)
		panic("%s:  no locks should be held (0x%x locks held)",
		      caller, (integer_t) locks_held);
}
Ejemplo n.º 18
0
/*
 *	timer_switch switches to a new timer.  The machine
 *	dependent routine/macro get_timestamp must return a timestamp.
 *	Caller must lock out interrupts.
 */
void
timer_switch(
	timer_t	new_timer)
{
	int		elapsed;
	int		mycpu;
	timer_t		mytimer;
	unsigned	ts;

	mp_disable_preemption();

	/*
	 *	Calculate elapsed time.
	 */
	mycpu = cpu_number();
	mytimer = current_timer[mycpu];
	ts = get_timestamp();
	elapsed = ts - mytimer->tstamp;
#ifdef	TIMER_MAX
	if (elapsed < 0) elapsed += TIMER_MAX;
#endif	/* TIMER_MAX */

	/*
	 *	Update current timer.
	 */
	mytimer->low_bits += elapsed;
	mytimer->tstamp = 0;

	/*
	 *	Normalization check
	 */
	if (mytimer->low_bits & TIMER_LOW_FULL) {
		timer_normalize(mytimer);
	}

	/*
	 *	Record new timer.
	 */
	current_timer[mycpu] = new_timer;
	new_timer->tstamp = ts;

	mp_enable_preemption();
}
Ejemplo n.º 19
0
/*
 *	init_timers initializes all non-thread timers and puts the
 *	service routine on the callout queue.  All timers must be
 *	serviced by the callout routine once an hour.
 */
void
init_timers(void)
{
	register int	i;
	register timer_t	this_timer;

	/*
	 *	Initialize all the kernel timers and start the one
	 *	for this cpu (master) slaves start theirs later.
	 */
	this_timer = &kernel_timer[0];
	for ( i=0 ; i<NCPUS ; i++, this_timer++) {
		timer_init(this_timer);
		current_timer[i] = (timer_t) 0;
	}

	mp_disable_preemption();
	start_timer(&kernel_timer[cpu_number()]);
	mp_enable_preemption();
}
Ejemplo n.º 20
0
void fillPage(ppnum_t pa, unsigned int fill)
{
        mapwindow_t *map;
	pmap_paddr_t src;
	int i;
	int cnt = PAGE_SIZE/sizeof(unsigned int);
	unsigned int *addr;

	mp_disable_preemption();

	src = i386_ptob(pa);
	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) | 
					      INTEL_PTE_REF | INTEL_PTE_MOD));

	for (i = 0, addr = (unsigned int *)map->prv_CADDR; i < cnt ; i++ )
	        *addr++ = fill;

	pmap_put_mapwindow(map);

	mp_enable_preemption();
}
Ejemplo n.º 21
0
void
delayed_clock(void)
{
    int	i;
    int	my_cpu;

    mp_disable_preemption();
    my_cpu = cpu_number();

    if (missed_clock[my_cpu] > 1 && detect_lost_tick)
        printf("hardclock: missed %d clock interrupt(s) at %x\n",
               missed_clock[my_cpu]-1, masked_pc[my_cpu]);
    if (my_cpu == master_cpu) {
        i = rtclock_intr();
        assert(i == 0);
    }
    hertz_tick(0, masked_pc[my_cpu]);
    missed_clock[my_cpu] = 0;

    mp_enable_preemption();
}
Ejemplo n.º 22
0
/*
 *	time_trap_uexit does trap exit timing.  Caller must lock out
 *	interrupts and take a timestamp.  ts is a timestamp taken after
 *	interrupts were locked out.  Must only be called if returning to
 *	user mode.
 */
void
time_trap_uexit(
	unsigned	ts)
{
	int	elapsed;
	int	mycpu;
	timer_t	mytimer;

	mp_disable_preemption();

	/*
	 *	Calculate elapsed time.
	 */
	mycpu = cpu_number();
	mytimer = current_timer[mycpu];
	elapsed = ts - mytimer->tstamp;
#ifdef	TIMER_MAX
	if (elapsed < 0) elapsed += TIMER_MAX;
#endif	/* TIMER_MAX */

	/*
	 *	Update current timer.
	 */
	mytimer->low_bits += elapsed;
	mytimer->tstamp = 0;

	if (mytimer->low_bits & TIMER_LOW_FULL) {
		timer_normalize(mytimer);	/* SYSTEMMODE */
	}

	mytimer = &(current_thread()->user_timer);

	/*
	 *	Record new timer.
	 */
	current_timer[mycpu] = mytimer;
	mytimer->tstamp = ts;

	mp_enable_preemption();
}
Ejemplo n.º 23
0
void
lapic_shutdown(void)
{
	uint32_t lo;
	uint32_t hi;
	uint32_t value;

	/* Shutdown if local APIC was enabled by OS */
	if (lapic_os_enabled == FALSE)
		return;

	mp_disable_preemption();

	/* ExtINT: masked */
	if (get_cpu_number() == master_cpu) {
		value = LAPIC_READ(LVT_LINT0);
		value |= LAPIC_LVT_MASKED;
		LAPIC_WRITE(LVT_LINT0, value);
	}

	/* Error: masked */
	LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);

	/* Timer: masked */
	LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);

	/* Perfmon: masked */
	LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);

	/* APIC software disabled */
	LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);

	/* Bypass the APIC completely and update cpu features */
	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
	lo &= ~MSR_IA32_APIC_BASE_ENABLE;
	wrmsr(MSR_IA32_APIC_BASE, lo, hi);
	cpuid_set_info();

	mp_enable_preemption();
}
Ejemplo n.º 24
0
void
xpr(
	const char	*msg,
	long		arg1,
	long		arg2,
	long		arg3,
	long		arg4,
	long		arg5)
{
	spl_t s;
	struct xprbuf *x;

	/* If we aren't initialized, ignore trace request */
	if (!xprenable || (xprptr == 0))
		return;
	/* Guard against all interrupts and allocate next buffer. */

	s = splhigh();
	simple_lock(&xprlock);
	x = xprptr++;
	if (xprptr >= xprlast) {
		/* wrap around */
		xprptr = xprbase;
	}
	/* Save xprptr in allocated memory. */
	*(struct xprbuf **)xprlast = xprptr;
	simple_unlock(&xprlock);
	x->timestamp = XPR_TIMESTAMP;
	splx(s);
	x->msg = msg;
	x->arg1 = arg1;
	x->arg2 = arg2;
	x->arg3 = arg3;
	x->arg4 = arg4;
	x->arg5 = arg5;
	mp_disable_preemption();
	x->cpuinfo = cpu_number();
	mp_enable_preemption();
}
Ejemplo n.º 25
0
/*
 * thread_set_user_ldt routine is the interface for the user level
 * settable ldt entry feature.  allowing a user to create arbitrary
 * ldt entries seems to be too large of a security hole, so instead
 * this mechanism is in place to allow user level processes to have
 * an ldt entry that can be used in conjunction with the FS register.
 *
 * Swapping occurs inside the pcb.c file along with initialization
 * when a thread is created. The basic functioning theory is that the
 * pcb->uldt_selector variable will contain either 0 meaning the
 * process has not set up any entry, or the selector to be used in
 * the FS register. pcb->uldt_desc contains the actual descriptor the
 * user has set up stored in machine usable ldt format.
 *
 * Currently one entry is shared by all threads (USER_SETTABLE), but
 * this could be changed in the future by changing how this routine
 * allocates the selector. There seems to be no real reason at this
 * time to have this added feature, but in the future it might be
 * needed.
 *
 * address is the linear address of the start of the data area size
 * is the size in bytes of the area flags should always be set to 0
 * for now. in the future it could be used to set R/W permisions or
 * other functions. Currently the segment is created as a data segment
 * up to 1 megabyte in size with full read/write permisions only.
 *
 * this call returns the segment selector or -1 if any error occurs
 */
kern_return_t
thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags)
{
	pcb_t pcb;
	struct fake_descriptor temp;
	int mycpu;

	if (flags != 0)
		return -1;		// flags not supported
	if (size > 0xFFFFF)
		return -1;		// size too big, 1 meg is the limit

	mp_disable_preemption();
	mycpu = cpu_number();

	// create a "fake" descriptor so we can use fix_desc()
	// to build a real one...
	//   32 bit default operation size
	//   standard read/write perms for a data segment
	pcb = (pcb_t)current_thread()->machine.pcb;
	temp.offset = address;
	temp.lim_or_seg = size;
	temp.size_or_wdct = SZ_32;
	temp.access = ACC_P|ACC_PL_U|ACC_DATA_W;

	// turn this into a real descriptor
	fix_desc(&temp,1);

	// set up our data in the pcb
	pcb->uldt_desc = *(struct real_descriptor*)&temp;
	pcb->uldt_selector = USER_SETTABLE;		// set the selector value

	// now set it up in the current table...
	*ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp;

	mp_enable_preemption();

	return USER_SETTABLE;
}
Ejemplo n.º 26
0
/* ARGSUSED */
void
usld_lock_pre(
	usimple_lock_t	l,
	pc_t		pc)
{
	char		*caller = "usimple_lock";

	if (!usld_lock_common_checks(l, caller))
		return;

	if ((l->debug.state & USLOCK_TAKEN) &&
	    l->debug.lock_thread == (void *) current_thread()) {
		printf("%s:  lock 0x%x already locked (at 0x%x) by",
		      caller, (integer_t) l, l->debug.lock_pc);
		printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
		       l->debug.lock_thread, pc);
		panic(caller);
	}
	mp_disable_preemption();
	usl_trace(l, cpu_number(), pc, caller);
	mp_enable_preemption();
}
Ejemplo n.º 27
0
/*
 *	time_int_entry does interrupt entry timing.  Caller must lock out
 *	interrupts and take a timestamp. ts is a timestamp taken after
 *	interrupts were locked out.  new_timer is the new timer to
 *	switch to.  This routine returns the currently running timer,
 *	which MUST be pushed onto the stack by the caller, or otherwise
 *	saved for time_int_exit.
 */
timer_t
time_int_entry(
	unsigned	ts,
	timer_t		new_timer)
{
	int	elapsed;
	int	mycpu;
	timer_t	mytimer;

	mp_disable_preemption();

	/*
	 *	Calculate elapsed time.
	 */
	mycpu = cpu_number();
	mytimer = current_timer[mycpu];

	elapsed = ts - mytimer->tstamp;
#ifdef	TIMER_MAX
	if (elapsed < 0) elapsed += TIMER_MAX;
#endif	/* TIMER_MAX */

	/*
	 *	Update current timer.
	 */
	mytimer->low_bits += elapsed;
	mytimer->tstamp = 0;

	/*
	 *	Switch to new timer, and save old one on stack.
	 */
	new_timer->tstamp = ts;
	current_timer[mycpu] = new_timer;

	mp_enable_preemption();

	return(mytimer);
}
Ejemplo n.º 28
0
void
ast_check(void)
{
	register int		mycpu;
	register processor_t	myprocessor;
	register thread_t	thread = current_thread();
	spl_t			s = splsched();

	mp_disable_preemption();
	mycpu = cpu_number();

	/*
	 *	Check processor state for ast conditions.
	 */
	myprocessor = cpu_to_processor(mycpu);
	switch(myprocessor->state) {
	    case PROCESSOR_OFF_LINE:
	    case PROCESSOR_IDLE:
	    case PROCESSOR_DISPATCHING:
		/*
		 *	No ast.
		 */
	    	break;

#if	NCPUS > 1
	    case PROCESSOR_ASSIGN:
	    case PROCESSOR_SHUTDOWN:
	        /*
		 * 	Need ast to force action thread onto processor.
		 *
		 * XXX  Should check if action thread is already there.
		 */
		ast_on(mycpu, AST_BLOCK);
		break;
#endif	/* NCPUS > 1 */

	    case PROCESSOR_RUNNING:
	    case PROCESSOR_VIDLE:

		/*
		 *	Propagate thread ast to processor.  If we already
		 *	need an ast, don't look for more reasons.
		 */
		ast_propagate(current_act(), mycpu);
		if (ast_needed(mycpu))
			break;

		/*
		 *	Context switch check.
		 */
		if (csw_needed(thread, myprocessor)) {
			ast_on(mycpu, (myprocessor->first_quantum ?
			       AST_BLOCK : AST_QUANTUM));
		}
		break;

	    default:
	        panic("ast_check: Bad processor state");
	}
	mp_enable_preemption();
	splx(s);
}
Ejemplo n.º 29
0
__private_extern__ int ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) {
	void *src, *dst;
	int err = 0;

	mp_disable_preemption();
#if NCOPY_WINDOWS > 0
	mapwindow_t *src_map, *dst_map;
	/* We rely on MTRRs here */
	src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF));
	dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD));
	src = (void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK));
	dst = (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK));
#elif defined(__x86_64__)
	addr64_t debug_pa = 0;

	/* If either destination or source are outside the
	 * physical map, establish a physical window onto the target frame.
	 */
	assert(physmap_enclosed(src64) || physmap_enclosed(dst64));

	if (physmap_enclosed(src64) == FALSE) {
		src = (void *)(debugger_window_kva | (src64 & INTEL_OFFMASK));
		dst = PHYSMAP_PTOV(dst64);
		debug_pa = src64 & PG_FRAME;
	} else if (physmap_enclosed(dst64) == FALSE) {
		src = PHYSMAP_PTOV(src64);
		dst = (void *)(debugger_window_kva | (dst64 & INTEL_OFFMASK));
		debug_pa = dst64 & PG_FRAME;
	} else {
		src = PHYSMAP_PTOV(src64);
		dst = PHYSMAP_PTOV(dst64);
	}
	/* DRK: debugger only routine, we don't bother checking for an
	 * identical mapping.
	 */
	if (debug_pa) {
		if (debugger_window_kva == 0)
			panic("%s: invoked in non-debug mode", __FUNCTION__);
		/* Establish a cache-inhibited physical window; some platforms
		 * may not cover arbitrary ranges with MTRRs
		 */
		pmap_store_pte(debugger_ptep, debug_pa | INTEL_PTE_NCACHE | INTEL_PTE_RW | INTEL_PTE_REF| INTEL_PTE_MOD | INTEL_PTE_VALID);
		flush_tlb_raw();
#if	DEBUG
		kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva, debug_pa);
#endif
	}
#endif
	/* ensure we stay within a page */
	if (((((uint32_t)src64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) ) {
	        panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64);
	}

	/*
	 * For device register access from the debugger,
	 * 2-byte/16-bit, 4-byte/32-bit and 8-byte/64-bit copies are handled
	 * by assembly routines ensuring the required access widths.
	 * 1-byte and other copies are handled by the regular _bcopy.
	 */
	switch (bytes) {
	case 2:
		err = _bcopy2(src, dst);
		break;
	case 4:
		err = _bcopy4(src, dst);
		break;
	case 8:
		err = _bcopy8(src, dst);
		break;
	case 1:
	default:
		err = _bcopy(src, dst, bytes);
		break;
	}

#if NCOPY_WINDOWS > 0
	pmap_put_mapwindow(src_map);
	pmap_put_mapwindow(dst_map);
#endif
	mp_enable_preemption();

	return err;
}
Ejemplo n.º 30
0
void
ast_taken(
	boolean_t		preemption,
	ast_t			mask,
	spl_t			old_spl
#if	FAST_IDLE
        ,int			thread_type
#endif	/* FAST_IDLE */
)
{
	register thread_t	self = current_thread();
	register processor_t	mypr;
	register ast_t		reasons;
	register int		mycpu;
	thread_act_t		act = self->top_act;

	/*
	 *	Interrupts are still disabled.
	 *	We must clear need_ast and then enable interrupts.
	 */

extern void	log_thread_action(thread_t, char *);

#if 0
	log_thread_action (current_thread(), "ast_taken");
#endif

	mp_disable_preemption();
	mycpu = cpu_number();
	reasons = need_ast[mycpu] & mask;
	need_ast[mycpu] &= ~reasons;
	mp_enable_preemption();

	splx(old_spl);

	/*
	 *	These actions must not block.
	 */

#if	MCMSG
	if (reasons & AST_MCMSG)
		mcmsg_ast();
#endif	/* MCMSG */

	if (reasons & AST_NETWORK)
		net_ast();

#if	MCMSG_ENG
	if (reasons & AST_RPCREQ)
		rpc_engine_request_intr();

	if (reasons & AST_RPCREPLY)
		rpc_engine_reply_intr();

	if (reasons & AST_RPCDEPART)
		rpc_engine_depart_intr();

	if (reasons & AST_RDMASEND)
		rdma_engine_send_intr();

	if (reasons & AST_RDMARECV)
		rdma_engine_recv_intr();

	if (reasons & AST_RDMATXF)
		rdma_engine_send_fault_intr();

	if (reasons & AST_RDMARXF)
		rdma_engine_recv_fault_intr();
#endif	/* MCMSG_ENG */

#if	PARAGON860 && MCMSG_ENG
	if (reasons & AST_SCAN_INPUT)
		scan_input_ast();
#endif	/* PARAGON860 */

#if	DIPC
	if (reasons & AST_DIPC)
		dipc_ast();
#endif	/* DIPC */

	/*
	 *	Make darn sure that we don't call thread_halt_self
	 *	or thread_block from the idle thread.
	 */

	/* XXX - this isn't currently right for the HALT case... */

	mp_disable_preemption();
	mypr = current_processor();
	if (self == mypr->idle_thread) {
#if	NCPUS == 1
	    if (reasons & AST_URGENT) {
		if (!preemption)
		    panic("ast_taken: AST_URGENT for idle_thr w/o preemption");
	    }
#endif
	    mp_enable_preemption();
	    return;
	}
	mp_enable_preemption();

#if	FAST_IDLE
	if (thread_type != NO_IDLE_THREAD)
		return;
#endif	/* FAST_IDLE */

#if	TASK_SWAPPER
	/* must be before AST_APC */
	if (reasons & AST_SWAPOUT) {
		spl_t s;
		swapout_ast();
		s = splsched();
		mp_disable_preemption();
		mycpu = cpu_number();
		if (need_ast[mycpu] & AST_APC) {
			/* generated in swapout_ast() to get suspended */
			reasons |= AST_APC;		/* process now ... */
			need_ast[mycpu] &= ~AST_APC;	/* ... and not later */
		}
		mp_enable_preemption();
		splx(s);
	}
#endif	/* TASK_SWAPPER */

	/* migration APC hook */
	if (reasons & AST_APC) {
		act_execute_returnhandlers();
		return;	/* auto-retry will catch anything new */
	}

	/* 
	 *	thread_block needs to know if the thread's quantum 
	 *	expired so the thread can be put on the tail of
	 *	run queue. One of the previous actions might well
	 *	have woken a high-priority thread, so we also use
	 *	csw_needed check.
	 */
	{   void (*safept)(void) = (void (*)(void))SAFE_EXCEPTION_RETURN;

	    if (reasons &= AST_PREEMPT) {
		    if (preemption)
			    safept = (void (*)(void)) 0;
	    } else {
		    mp_disable_preemption();
		    mypr = current_processor();
		    if (csw_needed(self, mypr)) {
			    reasons = (mypr->first_quantum
				       ? AST_BLOCK
				       : AST_QUANTUM);
		    }
		    mp_enable_preemption();
	    }
	    if (reasons) {
		    counter(c_ast_taken_block++);
		    thread_block_reason(safept, reasons);
	    }
	}
}