示例#1
0
文件: chario.c 项目: 0xffea/gnumach
/*
 * Retry wait for CARR_ON for open.
 * No locks may be held.
 * May run on any CPU.
 */
boolean_t char_open_done(
	io_req_t	ior)
{
	register struct tty *tp = (struct tty *)ior->io_dev_ptr;
	spl_t s = spltty();

	simple_lock(&tp->t_lock);
	if ((tp->t_state & TS_ISOPEN) == 0) {
	    queue_delayed_reply(&tp->t_delayed_open, ior, char_open_done);
	    simple_unlock(&tp->t_lock);
	    splx(s);
	    return FALSE;
	}

	tp->t_state |= TS_ISOPEN;
	tp->t_state &= ~TS_WOPEN;

	if (tp->t_mctl)
		(*tp->t_mctl)(tp, TM_RTS, DMBIS);

	simple_unlock(&tp->t_lock);
	splx(s);

	ior->io_error = D_SUCCESS;
	(void) ds_open_done(ior);
	return TRUE;
}
示例#2
0
void
mk_timer_port_destroy(
	ipc_port_t			port)
{
	mk_timer_t			timer = NULL;

	ip_lock(port);
	if (ip_kotype(port) == IKOT_TIMER) {
		timer = (mk_timer_t)port->ip_kobject;
		assert(timer != NULL);
		ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE);
		simple_lock(&timer->lock);
		assert(timer->port == port);
	}
	ip_unlock(port);

	if (timer != NULL) {
		if (thread_call_cancel(&timer->call_entry))
			timer->active--;
		timer->is_armed = FALSE;

		timer->is_dead = TRUE;
		if (timer->active == 0) {
			simple_unlock(&timer->lock);
			zfree(mk_timer_zone, timer);

			ipc_port_release_send(port);
			return;
		}

		simple_unlock(&timer->lock);
	}
}
示例#3
0
文件: chario.c 项目: 0xffea/gnumach
/*
 * Retry wait for output queue emptied, for write.
 * No locks may be held.
 * May run on any CPU.
 */
boolean_t char_write_done(
	register io_req_t	ior)
{
	register struct tty *tp = (struct tty *)ior->io_dev_ptr;
	register spl_t s = spltty();

	simple_lock(&tp->t_lock);
	if (tp->t_outq.c_cc > TTHIWAT(tp) ||
	    (tp->t_state & TS_CARR_ON) == 0) {

	    queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done);
	    simple_unlock(&tp->t_lock);
	    splx(s);
	    return FALSE;
	}
	simple_unlock(&tp->t_lock);
	splx(s);

	if (IP_VALID(ior->io_reply_port)) {
	  (void) (*((ior->io_op & IO_INBAND) ?
		    ds_device_write_reply_inband :
		    ds_device_write_reply))(ior->io_reply_port,
					    ior->io_reply_port_type,
					    ior->io_error,
					    (int) (ior->io_total -
						   ior->io_residual));
	}
	mach_device_deallocate(ior->io_device);
	return TRUE;
}
示例#4
0
文件: cpu.c 项目: Bitesher/xnu
void
cpu_exit_wait(
	int cpu)
{
    	cpu_data_t	*cdp = cpu_datap(cpu);
	boolean_t	intrs_enabled;
	uint64_t	tsc_timeout;

	/*
	 * Wait until the CPU indicates that it has stopped.
	 * Disable interrupts while the topo lock is held -- arguably
	 * this should always be done but in this instance it can lead to
	 * a timeout if long-running interrupt were to occur here.
	 */
	intrs_enabled = ml_set_interrupts_enabled(FALSE);
	simple_lock(&x86_topo_lock);
	/* Set a generous timeout of several seconds (in TSC ticks) */
	tsc_timeout = rdtsc64() + (10ULL * 1000 * 1000 * 1000);
	while ((cdp->lcpu.state != LCPU_HALT)
	       && (cdp->lcpu.state != LCPU_OFF)
	       && !cdp->lcpu.stopped) {
	    simple_unlock(&x86_topo_lock);
	    ml_set_interrupts_enabled(intrs_enabled);
	    cpu_pause();
	    if (rdtsc64() > tsc_timeout)
		panic("cpu_exit_wait(%d) timeout", cpu);
	    ml_set_interrupts_enabled(FALSE);
	    simple_lock(&x86_topo_lock);
	}
	simple_unlock(&x86_topo_lock);
	ml_set_interrupts_enabled(intrs_enabled);
}
示例#5
0
/*
 * dmio_usrreq_done:
 *
 *	Dmover completion callback.
 */
static void
dmio_usrreq_done(struct dmover_request *dreq)
{
	struct dmio_usrreq_state *dus = dreq->dreq_cookie;
	struct dmio_state *ds = dreq->dreq_session->dses_cookie;

	/* We're already at splsoftclock(). */

	simple_lock(&ds->ds_slock);
	TAILQ_REMOVE(&ds->ds_pending, dus, dus_q);
	if (ds->ds_flags & DMIO_STATE_DEAD) {
		ds->ds_nreqs--;
		dmio_usrreq_fini(ds, dus);
		dmover_request_free(dreq);
		if (ds->ds_nreqs == 0) {
			simple_unlock(&ds->ds_slock);
			seldestroy(&ds->ds_selq);
			pool_put(&dmio_state_pool, ds);
			return;
		}
	} else {
		TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q);
		if (ds->ds_flags & DMIO_STATE_READ_WAIT) {
			ds->ds_flags &= ~DMIO_STATE_READ_WAIT;
			wakeup(&ds->ds_complete);
		}
		if (ds->ds_flags & DMIO_STATE_SEL) {
			ds->ds_flags &= ~DMIO_STATE_SEL;
			selnotify(&ds->ds_selq, POLLOUT | POLLWRNORM, 0);
		}
	}
	simple_unlock(&ds->ds_slock);
}
示例#6
0
文件: chario.c 项目: 0xffea/gnumach
/*
 * Retry wait for characters, for read.
 * No locks may be held.
 * May run on any CPU - does not talk to device driver.
 */
boolean_t char_read_done(
	register io_req_t	ior)
{
	register struct tty *tp = (struct tty *)ior->io_dev_ptr;
	register spl_t s = spltty();

	simple_lock(&tp->t_lock);

	if (tp->t_inq.c_cc <= 0 ||
	    (tp->t_state & TS_CARR_ON) == 0) {

	    queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done);
	    simple_unlock(&tp->t_lock);
	    splx(s);
	    return FALSE;
	}

	ior->io_residual = ior->io_count - q_to_b(&tp->t_inq,
						  ior->io_data,
						  (int)ior->io_count);
	if (tp->t_state & TS_RTS_DOWN) {
	    (*tp->t_mctl)(tp, TM_RTS, DMBIS);
	    tp->t_state &= ~TS_RTS_DOWN;
	}

	simple_unlock(&tp->t_lock);
	splx(s);

	(void) ds_read_done(ior);
	return TRUE;
}
示例#7
0
文件: lock.c 项目: 0xffea/gnumach
/*
 *	Routine:	lock_try_read_to_write
 *	Function:
 *		Improves a read-only lock to one with
 *		write permission.  If another reader has
 *		already requested an upgrade to a write lock,
 *		the read lock is still held upon return.
 *
 *		Returns FALSE if the upgrade *failed*.
 */
boolean_t lock_try_read_to_write(
	register lock_t	l)
{
	check_simple_locks();
	simple_lock(&l->interlock);

	if (l->thread == current_thread()) {
		/*
		 *	Recursive lock
		 */
		l->read_count--;
		l->recursion_depth++;
		simple_unlock(&l->interlock);
		return TRUE;
	}

	if (l->want_upgrade) {
		simple_unlock(&l->interlock);
		return FALSE;
	}
	l->want_upgrade = TRUE;
	l->read_count--;

	while (l->read_count != 0) {
		l->waiting = TRUE;
		thread_sleep(l,
			simple_lock_addr(l->interlock), FALSE);
		simple_lock(&l->interlock);
	}

	simple_unlock(&l->interlock);
	return TRUE;
}
示例#8
0
文件: lock.c 项目: 0xffea/gnumach
boolean_t lock_try_write(
	register lock_t	l)
{
	simple_lock(&l->interlock);

	if (l->thread == current_thread()) {
		/*
		 *	Recursive lock
		 */
		l->recursion_depth++;
		simple_unlock(&l->interlock);
		return TRUE;
	}

	if (l->want_write || l->want_upgrade || l->read_count) {
		/*
		 *	Can't get lock.
		 */
		simple_unlock(&l->interlock);
		return FALSE;
	}

	/*
	 *	Have lock.
	 */

	l->want_write = TRUE;
	simple_unlock(&l->interlock);
	return TRUE;
}
示例#9
0
void
action_thread(void)
{
	register processor_t	processor;
	spl_t			s;

	thread_swappable(current_act(), FALSE);

	while (TRUE) {
		s = splsched();
		simple_lock(&action_lock);
		while ( !queue_empty(&action_queue)) {
			processor = (processor_t) queue_first(&action_queue);
			queue_remove(&action_queue, processor, processor_t,
				     processor_queue);
			simple_unlock(&action_lock);
			splx(s);

			processor_doaction(processor);

			s = splsched();
			simple_lock(&action_lock);
		}

		assert_wait((event_t) &action_queue, FALSE);
		simple_unlock(&action_lock);
		splx(s);
		counter(c_action_thread_block++);
		thread_block((void (*)(void)) 0);
	}
}
示例#10
0
文件: lock.c 项目: rohsaini/mkunity
boolean_t
lock_try_read(
	register lock_t	* l)
{
	start_data_node_t  entry = {0};
	unsigned short     trace = 0;
	pc_t		   pc;

        ETAP_STAMP(lock_event_table(l), trace, trace);
        ETAP_CREATE_ENTRY(entry, trace);

	simple_lock(&l->interlock);

	if (l->want_write || l->want_upgrade) {
		simple_unlock(&l->interlock);
                ETAP_DESTROY_ENTRY(entry);
		return(FALSE);
	}

	l->read_count++;

        ETAP_LINK_ENTRY(l, entry, trace);

	simple_unlock(&l->interlock);

        MON_ASSIGN_PC(entry->start_pc, pc, trace);
        ETAP_DURATION_TIMESTAMP(entry, trace);

	return(TRUE);
}
示例#11
0
文件: thread.c 项目: CptFrazz/xnu
/*
 *	thread_stack_daemon:
 *
 *	Perform stack allocation as required due to
 *	invoke failures.
 */
static void
thread_stack_daemon(void)
{
	thread_t		thread;

	simple_lock(&thread_stack_lock);

	while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
		simple_unlock(&thread_stack_lock);

		stack_alloc(thread);
		
		(void)splsched();
		thread_lock(thread);
		thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
		thread_unlock(thread);
		(void)spllo();

		simple_lock(&thread_stack_lock);
	}

	assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
	simple_unlock(&thread_stack_lock);

	thread_block((thread_continue_t)thread_stack_daemon);
	/*NOTREACHED*/
}
示例#12
0
/*
 *	Routine:	cpu_machine_init
 *	Function:
 */
void
cpu_machine_init(
	void)
{
	struct per_proc_info			*proc_info;
	volatile struct per_proc_info	*mproc_info;


	proc_info = getPerProc();
	mproc_info = PerProcTable[master_cpu].ppe_vaddr;

	if (proc_info != mproc_info) {
		simple_lock(&rht_lock);
		if (rht_state & RHT_WAIT)
			thread_wakeup(&rht_state);
		rht_state &= ~(RHT_BUSY|RHT_WAIT);
		simple_unlock(&rht_lock);
	}

	PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));

	if (proc_info->hibernate) {
		uint32_t	tbu, tbl;

		do {
			tbu = mftbu();
			tbl = mftb();
		} while (mftbu() != tbu);

	    proc_info->hibernate = 0;
	    hibernate_machine_init();

		// hibernate_machine_init() could take minutes and we don't want timeouts
		// to fire as soon as scheduling starts. Reset timebase so it appears
		// no time has elapsed, as it would for regular sleep.
		mttb(0);
		mttbu(tbu);
		mttb(tbl);
	}

	if (proc_info != mproc_info) {
	while (!((mproc_info->cpu_flags) & SignalReady)) 
			continue;
		cpu_sync_timebase();
	}

	ml_init_interrupt();
	if (proc_info != mproc_info)
		simple_lock(&SignalReadyLock);
	proc_info->cpu_flags |= BootDone|SignalReady;
	if (proc_info != mproc_info) {
		if (proc_info->ppXFlags & SignalReadyWait) {
			(void)hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
			thread_wakeup(&proc_info->cpu_flags);
		}
		simple_unlock(&SignalReadyLock);
		pmsPark();						/* Timers should be cool now, park the power management stepper */
	}
}
示例#13
0
文件: host.c 项目: DJHartley/xnu
kern_return_t
get_sched_statistics( 
		struct _processor_statistics_np *out, 
		uint32_t *count)
{
	processor_t processor;

	if (!sched_stats_active) {
		return KERN_FAILURE;
	}

	simple_lock(&processor_list_lock);
	
	if (*count < (processor_count + 2) * sizeof(struct _processor_statistics_np)) { /* One for RT, one for FS */
		simple_unlock(&processor_list_lock);
		return KERN_FAILURE;
	}

	processor = processor_list;
	while (processor) {
		struct processor_sched_statistics *stats = &processor->processor_data.sched_stats;

		out->ps_cpuid 			= processor->cpu_id;
		out->ps_csw_count 		= stats->csw_count;
		out->ps_preempt_count 		= stats->preempt_count;
		out->ps_preempted_rt_count 	= stats->preempted_rt_count;
		out->ps_preempted_by_rt_count 	= stats->preempted_by_rt_count;
		out->ps_rt_sched_count		= stats->rt_sched_count;
		out->ps_interrupt_count 	= stats->interrupt_count;
		out->ps_ipi_count 		= stats->ipi_count;
		out->ps_timer_pop_count 	= stats->timer_pop_count;
		out->ps_runq_count_sum 		= SCHED(processor_runq_stats_count_sum)(processor);
		out->ps_idle_transitions	= stats->idle_transitions;
		out->ps_quantum_timer_expirations	= stats->quantum_timer_expirations;

		out++;
		processor = processor->processor_list;
	}

	*count = (uint32_t) (processor_count * sizeof(struct _processor_statistics_np));

	simple_unlock(&processor_list_lock);

	/* And include RT Queue information */
	bzero(out, sizeof(*out));
	out->ps_cpuid = (-1);
	out->ps_runq_count_sum = rt_runq.runq_stats.count_sum;
	out++;
	*count += (uint32_t)sizeof(struct _processor_statistics_np);

	/* And include Fair Share Queue information at the end */
	bzero(out, sizeof(*out));
	out->ps_cpuid = (-2);
	out->ps_runq_count_sum = SCHED(fairshare_runq_stats_count_sum)();
	*count += (uint32_t)sizeof(struct _processor_statistics_np);
	
	return KERN_SUCCESS;
}
示例#14
0
文件: lock.c 项目: 0xffea/gnumach
void lock_write(
	register lock_t	l)
{
	register int	i;

	check_simple_locks();
	simple_lock(&l->interlock);

	if (l->thread == current_thread()) {
		/*
		 *	Recursive lock.
		 */
		l->recursion_depth++;
		simple_unlock(&l->interlock);
		return;
	}

	/*
	 *	Try to acquire the want_write bit.
	 */
	while (l->want_write) {
		if ((i = lock_wait_time) > 0) {
			simple_unlock(&l->interlock);
			while (--i > 0 && l->want_write)
				continue;
			simple_lock(&l->interlock);
		}

		if (l->can_sleep && l->want_write) {
			l->waiting = TRUE;
			thread_sleep(l,
				simple_lock_addr(l->interlock), FALSE);
			simple_lock(&l->interlock);
		}
	}
	l->want_write = TRUE;

	/* Wait for readers (and upgrades) to finish */

	while ((l->read_count != 0) || l->want_upgrade) {
		if ((i = lock_wait_time) > 0) {
			simple_unlock(&l->interlock);
			while (--i > 0 && (l->read_count != 0 ||
					l->want_upgrade))
				continue;
			simple_lock(&l->interlock);
		}

		if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
			l->waiting = TRUE;
			thread_sleep(l,
				simple_lock_addr(l->interlock), FALSE);
			simple_lock(&l->interlock);
		}
	}
	simple_unlock(&l->interlock);
}
示例#15
0
文件: lock.c 项目: 0xffea/gnumach
/*
 *	Routine:	lock_read_to_write
 *	Function:
 *		Improves a read-only lock to one with
 *		write permission.  If another reader has
 *		already requested an upgrade to a write lock,
 *		no lock is held upon return.
 *
 *		Returns TRUE if the upgrade *failed*.
 */
boolean_t lock_read_to_write(
	register lock_t	l)
{
	register int	i;

	check_simple_locks();
	simple_lock(&l->interlock);

	l->read_count--;

	if (l->thread == current_thread()) {
		/*
		 *	Recursive lock.
		 */
		l->recursion_depth++;
		simple_unlock(&l->interlock);
		return(FALSE);
	}

	if (l->want_upgrade) {
		/*
		 *	Someone else has requested upgrade.
		 *	Since we've released a read lock, wake
		 *	him up.
		 */
		if (l->waiting && (l->read_count == 0)) {
			l->waiting = FALSE;
			thread_wakeup(l);
		}

		simple_unlock(&l->interlock);
		return TRUE;
	}

	l->want_upgrade = TRUE;

	while (l->read_count != 0) {
		if ((i = lock_wait_time) > 0) {
			simple_unlock(&l->interlock);
			while (--i > 0 && l->read_count != 0)
				continue;
			simple_lock(&l->interlock);
		}

		if (l->can_sleep && l->read_count != 0) {
			l->waiting = TRUE;
			thread_sleep(l,
				simple_lock_addr(l->interlock), FALSE);
			simple_lock(&l->interlock);
		}
	}

	simple_unlock(&l->interlock);
	return FALSE;
}
示例#16
0
/*
 * dmio_ioctl:
 *
 *	Ioctl file op.
 */
static int
dmio_ioctl(struct file *fp, u_long cmd, void *data)
{
	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
	int error, s;

	switch (cmd) {
	case FIONBIO:
	case FIOASYNC:
		return (0);

	case DMIO_SETFUNC:
	    {
		struct dmio_setfunc *dsf = data;
		struct dmover_session *dses;

		s = splsoftclock();
		simple_lock(&ds->ds_slock);

		if (ds->ds_session != NULL ||
		    (ds->ds_flags & DMIO_STATE_LARVAL) != 0) {
			simple_unlock(&ds->ds_slock);
			splx(s);
			return (EBUSY);
		}

		ds->ds_flags |= DMIO_STATE_LARVAL;

		simple_unlock(&ds->ds_slock);
		splx(s);

		dsf->dsf_name[DMIO_MAX_FUNCNAME - 1] = '\0';
		error = dmover_session_create(dsf->dsf_name, &dses);

		s = splsoftclock();
		simple_lock(&ds->ds_slock);

		if (error == 0) {
			dses->dses_cookie = ds;
			ds->ds_session = dses;
		}
		ds->ds_flags &= ~DMIO_STATE_LARVAL;

		simple_unlock(&ds->ds_slock);
		splx(s);
		break;
	    }

	default:
		error = ENOTTY;
	}

	return (error);
}
示例#17
0
/*
 *	thread_terminate_daemon:
 *
 *	Perform final clean up for terminating threads.
 */
static void
thread_terminate_daemon(void)
{
	thread_t			thread;
	task_t				task;

	(void)splsched();
	simple_lock(&thread_terminate_lock);

	while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
		simple_unlock(&thread_terminate_lock);
		(void)spllo();

		task = thread->task;

		task_lock(task);
		task->total_user_time += timer_grab(&thread->user_timer);
		task->total_system_time += timer_grab(&thread->system_timer);

		task->c_switch += thread->c_switch;
		task->p_switch += thread->p_switch;
		task->ps_switch += thread->ps_switch;

		queue_remove(&task->threads, thread, thread_t, task_threads);
		task->thread_count--;

		/* 
		 * If the task is being halted, and there is only one thread
		 * left in the task after this one, then wakeup that thread.
		 */
		if (task->thread_count == 1 && task->halting)
			thread_wakeup((event_t)&task->halting);

		task_unlock(task);

		lck_mtx_lock(&tasks_threads_lock);
		queue_remove(&threads, thread, thread_t, threads);
		threads_count--;
		lck_mtx_unlock(&tasks_threads_lock);

		thread_deallocate(thread);

		(void)splsched();
		simple_lock(&thread_terminate_lock);
	}

	assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
	simple_unlock(&thread_terminate_lock);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_daemon);
	/*NOTREACHED*/
}
示例#18
0
static void
rf_RaidIOThread(RF_ThreadArg_t arg)
{
	RF_Raid_t *raidPtr;
	RF_DiskQueueData_t *req;
	int s;

	raidPtr = (RF_Raid_t *) arg;

	s = splbio();
	simple_lock(&(raidPtr->iodone_lock));

	while (!raidPtr->shutdown_raidio) {
		/* if there is nothing to do, then snooze. */
		if (TAILQ_EMPTY(&(raidPtr->iodone)) &&
		    rf_buf_queue_check(raidPtr->raidid)) {
			ltsleep(&(raidPtr->iodone), PRIBIO, "raidiow", 0,
				&(raidPtr->iodone_lock));
		}

		/* Check for deferred parity-map-related work. */
		if (raidPtr->parity_map != NULL) {
			simple_unlock(&(raidPtr->iodone_lock));
			rf_paritymap_checkwork(raidPtr->parity_map);
			simple_lock(&(raidPtr->iodone_lock));
		}

		/* See what I/Os, if any, have arrived */
		while ((req = TAILQ_FIRST(&(raidPtr->iodone))) != NULL) {
			TAILQ_REMOVE(&(raidPtr->iodone), req, iodone_entries);
			simple_unlock(&(raidPtr->iodone_lock));
			rf_DiskIOComplete(req->queue, req, req->error);
			(req->CompleteFunc) (req->argument, req->error);
			simple_lock(&(raidPtr->iodone_lock));
		}

		/* process any pending outgoing IO */
		simple_unlock(&(raidPtr->iodone_lock));
		raidstart(raidPtr);
		simple_lock(&(raidPtr->iodone_lock));

	}

	/* Let rf_ShutdownEngine know that we're done... */
	raidPtr->shutdown_raidio = 0;
	wakeup(&(raidPtr->shutdown_raidio));

	simple_unlock(&(raidPtr->iodone_lock));
	splx(s);

	kthread_exit(0);
}
示例#19
0
static void
mk_timer_expire(
	void			*p0,
	__unused void		*p1)
{
	mk_timer_t			timer = p0;
	ipc_port_t			port;

	simple_lock(&timer->lock);

	if (timer->active > 1) {
		timer->active--;
		simple_unlock(&timer->lock);
		return;
	}

	port = timer->port;
	assert(port != IP_NULL);
	assert(timer->active == 1);

	while (timer->is_armed && timer->active == 1) {
		mk_timer_expire_msg_t		msg;

		timer->is_armed = FALSE;
		simple_unlock(&timer->lock);

		msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
		msg.header.msgh_remote_port = port;
		msg.header.msgh_local_port = MACH_PORT_NULL;
		msg.header.msgh_reserved = msg.header.msgh_id = 0;

		msg.unused[0] = msg.unused[1] = msg.unused[2] = 0;

		(void) mach_msg_send_from_kernel_proper(&msg.header, sizeof (msg));

		simple_lock(&timer->lock);
	}

	if (--timer->active == 0 && timer->is_dead) {
		simple_unlock(&timer->lock);
		zfree(mk_timer_zone, timer);

		ipc_port_release_send(port);
		return;
	}

	simple_unlock(&timer->lock);
}
示例#20
0
static int
tap_dev_kqfilter(int unit, struct knote *kn)
{
	struct tap_softc *sc =
	    device_lookup_private(&tap_cd, unit);

	if (sc == NULL)
		return (ENXIO);

	KERNEL_LOCK(1, NULL);
	switch(kn->kn_filter) {
	case EVFILT_READ:
		kn->kn_fop = &tap_read_filterops;
		break;
	case EVFILT_WRITE:
		kn->kn_fop = &tap_seltrue_filterops;
		break;
	default:
		KERNEL_UNLOCK_ONE(NULL);
		return (EINVAL);
	}

	kn->kn_hook = sc;
	simple_lock(&sc->sc_kqlock);
	SLIST_INSERT_HEAD(&sc->sc_rsel.sel_klist, kn, kn_selnext);
	simple_unlock(&sc->sc_kqlock);
	KERNEL_UNLOCK_ONE(NULL);
	return (0);
}
示例#21
0
/*
 * Called by main() when ufs is going to be mounted as root.
 */
lfs_mountroot()
{
	extern struct vnode *rootvp;
	struct fs *fs;
	struct mount *mp;
	struct proc *p = curproc;	/* XXX */
	int error;
	
	/*
	 * Get vnodes for swapdev and rootdev.
	 */
	if ((error = bdevvp(swapdev, &swapdev_vp)) ||
	    (error = bdevvp(rootdev, &rootvp))) {
		printf("lfs_mountroot: can't setup bdevvp's");
		return (error);
	}
	if (error = vfs_rootmountalloc("lfs", "root_device", &mp))
		return (error);
	if (error = lfs_mountfs(rootvp, mp, p)) {
		mp->mnt_vfc->vfc_refcount--;
		vfs_unbusy(mp, p);
		free(mp, M_MOUNT);
		return (error);
	}
	simple_lock(&mountlist_slock);
	CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
	simple_unlock(&mountlist_slock);
	(void)lfs_statfs(mp, &mp->mnt_stat, p);
	vfs_unbusy(mp, p);
	return (0);
}
示例#22
0
/* Updated every time a logical CPU goes offline/online */
void
commpage_update_active_cpus(void)
{
	char	    *cp;
	volatile uint8_t    *ip;
	
	/* At least 32-bit commpage must be initialized */
	if (!commPagePtr32)
		return;

	simple_lock(&commpage_active_cpus_lock);

	cp = commPagePtr32;
	cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_BASE_ADDRESS);
	ip = (volatile uint8_t*) cp;
	*ip = (uint8_t) processor_avail_count;
	
	cp = commPagePtr64;
	if ( cp ) {
		cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_START_ADDRESS);
		ip = (volatile uint8_t*) cp;
		*ip = (uint8_t) processor_avail_count;
	}

	simple_unlock(&commpage_active_cpus_lock);
}
示例#23
0
void
wskbd_hotkey_put(keysym_t sym)
{
	int s, changed;
	u_int nxtpos;

	changed = 0;

	s = spltty();
	simple_lock(&queue_lock);

	nxtpos = (queue_head + 1) % WSKBD_HOTKEY_MAXEVENTS;
	if (nxtpos != queue_tail) {
		ksym_queue[queue_head] = sym;
		queue_head = nxtpos;
		changed = 1;
	}
#ifdef DEBUG
	else
		printf("wskbd_hotkey_put: losing hotkey\n");
#endif

	simple_unlock(&queue_lock);
	splx(s);

	if (changed != 0)
		wakeup(ksym_queue);
}
示例#24
0
void
ntfs_nthashreinit()
{
	struct ntnode *ip;
	struct nthashhead *oldhash, *hash;
	u_long oldmask, mask, val;
	int i;

	hash = HASHINIT(desiredvnodes, M_NTFSNTHASH, M_WAITOK, &mask);

	simple_lock(&ntfs_nthash_slock);
	oldhash = ntfs_nthashtbl;
	oldmask = ntfs_nthash;
	ntfs_nthashtbl = hash;
	ntfs_nthash = mask;
	for (i = 0; i <= oldmask; i++) {
		while ((ip = LIST_FIRST(&oldhash[i])) != NULL) {
			LIST_REMOVE(ip, i_hash);
			val = NTNOHASH(ip->i_dev, ip->i_number);
			LIST_INSERT_HEAD(&hash[val], ip, i_hash);
		}
	}
	simple_unlock(&ntfs_nthash_slock);
	hashdone(oldhash, M_NTFSNTHASH);
}
示例#25
0
文件: himem.c 项目: rohsaini/mkunity
void
himem_revert(
	hil_t		hil)
{
	hil_t		next;
	boolean_t	wakeup = FALSE;
	spl_t		ipl;

	while(hil) {
		if (hil->length) {
			bcopy((char *)phystokv(hil->low_page + hil->offset),
				(char *)phystokv(hil->high_addr),
			      hil->length);
		}
		hil->high_addr = 0;
		hil->length = 0;
		hil->offset = 0;
		next = hil->next;
		ipl = splhi();
		simple_lock(&hil_lock);
		if (!(hil->next = hil_head))
			wakeup = TRUE;
		hil_head = hil;
		simple_unlock(&hil_lock);
		splx(ipl);
		hil = next;
	}
	if (wakeup)
		thread_wakeup((event_t)&hil_head);
}
示例#26
0
static __inline void
uvmfault_anonflush(struct vm_anon **anons, int n)
{
	int lcv;
	struct vm_page *pg;
	
	for (lcv = 0 ; lcv < n ; lcv++) {
		if (anons[lcv] == NULL)
			continue;
		simple_lock(&anons[lcv]->an_lock);
		pg = anons[lcv]->an_page;
		if (pg && (pg->pg_flags & PG_BUSY) == 0 && pg->loan_count == 0) {
			uvm_lock_pageq();
			if (pg->wire_count == 0) {
#ifdef UBC
				pmap_clear_reference(pg);
#else
				pmap_page_protect(pg, VM_PROT_NONE);
#endif
				uvm_pagedeactivate(pg);
			}
			uvm_unlock_pageq();
		}
		simple_unlock(&anons[lcv]->an_lock);
	}
}
示例#27
0
文件: chario.c 项目: 0xffea/gnumach
/*
 * Handle port-death (dead reply port) for tty.
 * No locks may be held.
 * May run on any CPU.
 */
boolean_t
tty_portdeath(
	struct tty *	tp,
	ipc_port_t	port)
{
	register spl_t	spl = spltty();
	register boolean_t	result;

	simple_lock(&tp->t_lock);

	/*
	 * The queues may never have been initialized
	 */
	if (tp->t_delayed_read.next == 0) {
	    result = FALSE;
	}
	else {
	    result =
		tty_queue_clean(&tp->t_delayed_read,  port,
				tty_close_read_reply)
	     || tty_queue_clean(&tp->t_delayed_write, port,
				tty_close_write_reply)
	     || tty_queue_clean(&tp->t_delayed_open,  port,
				tty_close_open_reply);
	}
	simple_unlock(&tp->t_lock);
	splx(spl);

	return result;
}
示例#28
0
文件: hw_perfmon.c 项目: SbIm/xnu-env
int perfmon_release_facility(task_t task)
{
	kern_return_t retval = KERN_SUCCESS;
	task_t old_perfmon_owner = hw_perfmon_owner;
  
	simple_lock(&hw_perfmon_lock);
  
	if(task!=hw_perfmon_owner) {
		retval = KERN_NO_ACCESS;
	} else {
		if(old_perfmon_owner==kernel_task) {
			if(hw_perfmon_thread_count>0) {
#ifdef HWPERFMON_DEBUG
				kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
#endif
				retval = KERN_NO_ACCESS;
			} else {
#ifdef HWPERFMON_DEBUG
				kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
#endif
				hw_perfmon_owner = TASK_NULL;
				retval = KERN_SUCCESS;
			}
		} else {
#ifdef HWPERFMON_DEBUG
			kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
#endif
			hw_perfmon_owner = TASK_NULL;
			retval = KERN_SUCCESS;
		}
	}

	simple_unlock(&hw_perfmon_lock);
	return retval;
}
示例#29
0
文件: hw_perfmon.c 项目: SbIm/xnu-env
int perfmon_disable(thread_t thread)
{
	struct savearea *sv = thread->machine.pcb;
	int curPMC;
  
	if(!(thread->machine.specFlags & perfMonitor)) {
		return KERN_NO_ACCESS; /* not enabled */
	} else {
		simple_lock(&hw_perfmon_lock);
		hw_perfmon_thread_count--;
		simple_unlock(&hw_perfmon_lock);
		perfmon_release_facility(kernel_task); /* will release if hw_perfmon_thread_count is 0 */
	}
  
	thread->machine.specFlags &= ~perfMonitor; /* disable perf monitor facility for this thread */
	if(thread==current_thread()) {
		PerProcTable[cpu_number()].ppe_vaddr->spcFlags &= ~perfMonitor; /* update per_proc */
	}
	sv->save_mmcr0 = 0;
	sv->save_mmcr1 = 0;
	sv->save_mmcr2 = 0;
  
	for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
		sv->save_pmc[curPMC] = 0;
		thread->machine.pmcovfl[curPMC] = 0;
		thread->machine.perfmonFlags = 0;
	}
  
#ifdef HWPERFMON_DEBUG
	kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
#endif  

	return KERN_SUCCESS;
}
示例#30
0
/*
 * sysmon_wdog_register:
 *
 *	Register a watchdog device.
 */
int
sysmon_wdog_register(struct sysmon_wdog *smw)
{
	struct sysmon_wdog *lsmw;
	int error = 0;

	simple_lock(&sysmon_wdog_list_slock);

	for (lsmw = LIST_FIRST(&sysmon_wdog_list); lsmw != NULL;
	     lsmw = LIST_NEXT(lsmw, smw_list)) {
		if (strcmp(lsmw->smw_name, smw->smw_name) == 0) {
			error = EEXIST;
			goto out;
		}
	}

	smw->smw_mode = WDOG_MODE_DISARMED;
	smw->smw_tickler = (pid_t) -1;
	smw->smw_refcnt = 0;
	sysmon_wdog_count++;
	LIST_INSERT_HEAD(&sysmon_wdog_list, smw, smw_list);

 out:
	simple_unlock(&sysmon_wdog_list_slock);
	return (error);
}