Ejemplo n.º 1
0
void
lck_grp_deallocate(
	lck_grp_t	*grp)
{
	if (hw_atomic_sub(&grp->lck_grp_refcnt, 1) == 0)
	 	kfree(grp, sizeof(lck_grp_t));
}
Ejemplo n.º 2
0
void
lck_grp_lckcnt_decr(
	lck_grp_t	*grp,
	lck_type_t	lck_type)
{
	unsigned int	*lckcnt;
	int		updated;

	switch (lck_type) {
	case LCK_TYPE_SPIN:
		lckcnt = &grp->lck_grp_spincnt;
		break;
	case LCK_TYPE_MTX:
		lckcnt = &grp->lck_grp_mtxcnt;
		break;
	case LCK_TYPE_RW:
		lckcnt = &grp->lck_grp_rwcnt;
		break;
	default:
		panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
		return;
	}

	updated = (int)hw_atomic_sub(lckcnt, 1);
	assert(updated >= 0);
}
Ejemplo n.º 3
0
/*
 *	Routine:	semaphore_dereference
 *
 *	Release a reference on a semaphore.  If this is the last reference,
 *	the semaphore data structure is deallocated.
 */
void
semaphore_dereference(
	semaphore_t		semaphore)
{
	uint32_t collisions;
	spl_t spl_level;

	if (semaphore == NULL)
		return;

	if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
		return;

	/*
	 * Last ref, clean up the port [if any]
	 * associated with the semaphore, destroy
	 * it (if still active) and then free
	 * the semaphore.
	 */
	ipc_port_t port = semaphore->port;

	if (IP_VALID(port)) {
		assert(!port->ip_srights);
		ipc_port_dealloc_kernel(port);
	}

	/*
	 * Lock the semaphore to lock in the owner task reference.
	 * Then continue to try to lock the task (inverse order).
	 */
	spl_level = splsched();
	semaphore_lock(semaphore);
	for (collisions = 0; semaphore->active; collisions++) {
		task_t task = semaphore->owner;

		assert(task != TASK_NULL);
		
		if (task_lock_try(task)) {
			semaphore_destroy_internal(task, semaphore);
			/* semaphore unlocked */
			splx(spl_level);
			task_unlock(task);
			goto out;
		}
		
		/* failed to get out-of-order locks */
		semaphore_unlock(semaphore);
		splx(spl_level);
		mutex_pause(collisions);
		spl_level = splsched();
		semaphore_lock(semaphore);
	}
	semaphore_unlock(semaphore);
	splx(spl_level);

 out:
	zfree(semaphore_zone, semaphore);
}
Ejemplo n.º 4
0
/*
 * Called with interrupts disabled.
 */
void
processor_doshutdown(
	processor_t			processor)
{
	thread_t			old_thread, self = current_thread();
	processor_t			prev;
	processor_set_t			pset;

	/*
	 *	Get onto the processor to shutdown
	 */
	prev = thread_bind(processor);
	thread_block(THREAD_CONTINUE_NULL);

	assert(processor->state == PROCESSOR_SHUTDOWN);

#if CONFIG_DTRACE
	if (dtrace_cpu_state_changed_hook)
		(*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
#endif

	ml_cpu_down();

#if HIBERNATION
	if (processor_avail_count < 2) {
		hibernate_vm_lock();
		hibernate_vm_unlock();
	}
#endif

	pset = processor->processor_set;
	pset_lock(pset);
	processor->state = PROCESSOR_OFF_LINE;
	--pset->online_processor_count;
	(void)hw_atomic_sub(&processor_avail_count, 1);
	commpage_update_active_cpus();
	SCHED(processor_queue_shutdown)(processor);
	/* pset lock dropped */

	/*
	 * Continue processor shutdown in shutdown context.
	 *
	 * We save the current context in machine_processor_shutdown in such a way
	 * that when this thread is next invoked it will return from here instead of
	 * from the machine_switch_context() in thread_invoke like a normal context switch.
	 *
	 * As such, 'old_thread' is neither the idle thread nor the current thread - it's whatever
	 * thread invoked back to this one. (Usually, it's another processor's idle thread.)
	 *
	 * TODO: Make this a real thread_run of the idle_thread, so we don't have to keep this in sync
	 * with thread_invoke.
	 */
	thread_bind(prev);
	old_thread = machine_processor_shutdown(self, processor_offline, processor);

	thread_dispatch(old_thread, self);
}
Ejemplo n.º 5
0
static void
xcRemote( uint32_t foo )
{
	xcArg_t *pArg = (xcArg_t *)foo;
	
	if ( pArg->cpu == CPU->cpu_id || pArg->cpu == DTRACE_CPUALL ) {
		(pArg->f)(pArg->arg);
	}
	
    if(!hw_atomic_sub(&(pArg->waitVar), 1)) {      /* Drop the wait count */
        thread_wakeup((event_t)&(pArg->waitVar));  /* If we were the last, wake up the signaller */
    }
}
Ejemplo n.º 6
0
void
OSMalloc_Tagfree(
	 OSMallocTag		tag)
{
	if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
		panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", tag->OSMT_name, tag->OSMT_state);

	if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
		OSMalloc_tag_spin_lock();
		(void)remque((queue_entry_t)tag);
		OSMalloc_tag_unlock();
		kfree((void*)tag, sizeof(*tag));
	}
}
Ejemplo n.º 7
0
/*
 * Called with interrupts disabled.
 */
void
processor_doshutdown(
	processor_t			processor)
{
	thread_t			old_thread, self = current_thread();
	processor_t			prev;
	processor_set_t			pset;

	/*
	 *	Get onto the processor to shutdown
	 */
	prev = thread_bind(processor);
	thread_block(THREAD_CONTINUE_NULL);

	assert(processor->state == PROCESSOR_SHUTDOWN);

#if CONFIG_DTRACE
	if (dtrace_cpu_state_changed_hook)
		(*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
#endif

	ml_cpu_down();

#if HIBERNATION
	if (processor_avail_count < 2) {
		hibernate_vm_lock();
		hibernate_vm_unlock();
	}
#endif

	pset = processor->processor_set;
	pset_lock(pset);
	processor->state = PROCESSOR_OFF_LINE;
	--pset->online_processor_count;
	(void)hw_atomic_sub(&processor_avail_count, 1);
	commpage_update_active_cpus();
	SCHED(processor_queue_shutdown)(processor);
	/* pset lock dropped */

	/*
	 *	Continue processor shutdown in shutdown context.
	 */
	thread_bind(prev);
	old_thread = machine_processor_shutdown(self, processor_offline, processor);

	thread_dispatch(old_thread, self);
}
Ejemplo n.º 8
0
/*
 * uio_free - free a uio_t allocated via uio_init.  this also frees all
 * 	associated iovecs.
 */
void uio_free( uio_t a_uio ) 
{
#if DEBUG
	if (a_uio == NULL) {
		panic("%s :%d - passing NULL uio_t\n", __FILE__, __LINE__); 
	}
#endif /* LP64_DEBUG */

	if (a_uio != NULL && (a_uio->uio_flags & UIO_FLAGS_WE_ALLOCED) != 0) {
#if DEBUG
		if (hw_atomic_sub(&uio_t_count, 1) == UINT_MAX)
			panic("%s :%d - uio_t_count underflow\n", __FILE__, __LINE__); 
#endif
		kfree(a_uio, a_uio->uio_size);
	}


}
Ejemplo n.º 9
0
void
vnode_pager_deallocate(
	memory_object_t		mem_obj)
{
	register vnode_pager_t	vnode_object;

	PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));

	vnode_object = vnode_pager_lookup(mem_obj);

	if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
		if (vnode_object->vnode_handle != NULL) {
			vnode_pager_vrele(vnode_object->vnode_handle);
		}
		zfree(vnode_pager_zone, vnode_object);
	}
	return;
}
Ejemplo n.º 10
0
void
lck_grp_lckcnt_decr(
	lck_grp_t	*grp,
	lck_type_t	lck_type)
{
	unsigned int	*lckcnt;

	switch (lck_type) {
	case LCK_TYPE_SPIN:
		lckcnt = &grp->lck_grp_spincnt;
		break;
	case LCK_TYPE_MTX:
		lckcnt = &grp->lck_grp_mtxcnt;
		break;
	case LCK_TYPE_RW:
		lckcnt = &grp->lck_grp_rwcnt;
		break;
	default:
		return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
	}

	(void)hw_atomic_sub(lckcnt, 1);
}
Ejemplo n.º 11
0
/*
 *	thread_terminate_self:
 */
void
thread_terminate_self(void)
{
	thread_t		thread = current_thread();

	task_t			task;
	spl_t			s;
	int threadcnt;

	DTRACE_PROC(lwp__exit);

	thread_mtx_lock(thread);

	ulock_release_all(thread);

	ipc_thread_disable(thread);
	
	thread_mtx_unlock(thread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel priority depression, wait for concurrent expirations
	 *	on other processors.
	 */
	if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
		thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;

		if (timer_call_cancel(&thread->depress_timer))
			thread->depress_timer_active--;
	}

	while (thread->depress_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	thread_sched_call(thread, NULL);

	thread_unlock(thread);
	splx(s);

	thread_policy_reset(thread);

#if CONFIG_EMBEDDED
	thead_remove_taskwatch(thread);
#endif /* CONFIG_EMBEDDED */

	task = thread->task;
	uthread_cleanup(task, thread->uthread, task->bsd_info);
	threadcnt = hw_atomic_sub(&task->active_thread_count, 1);

	/*
	 * If we are the last thread to terminate and the task is
	 * associated with a BSD process, perform BSD process exit.
	 */
	if (threadcnt == 0 && task->bsd_info != NULL)
		proc_exit(task->bsd_info);

	uthread_cred_free(thread->uthread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel wait timer, and wait for
	 *	concurrent expirations.
	 */
	if (thread->wait_timer_is_set) {
		thread->wait_timer_is_set = FALSE;

		if (timer_call_cancel(&thread->wait_timer))
			thread->wait_timer_active--;
	}

	while (thread->wait_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	/*
	 *	If there is a reserved stack, release it.
	 */
	if (thread->reserved_stack != 0) {
		stack_free_reserved(thread);
		thread->reserved_stack = 0;
	}

	/*
	 *	Mark thread as terminating, and block.
	 */
	thread->state |= TH_TERMINATE;
	thread_mark_wait_locked(thread, THREAD_UNINT);
	thread_unlock(thread);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_continue);
	/*NOTREACHED*/
}
Ejemplo n.º 12
0
/*
 *	thread_terminate_self:
 */
void
thread_terminate_self(void)
{
	thread_t		thread = current_thread();
	task_t			task;
	spl_t			s;
	int lastthread = 0;

	thread_mtx_lock(thread);

	ulock_release_all(thread);

	ipc_thread_disable(thread);
	
	thread_mtx_unlock(thread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel priority depression, wait for concurrent expirations
	 *	on other processors.
	 */
	if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
		thread->sched_mode &= ~TH_MODE_ISDEPRESSED;

		if (timer_call_cancel(&thread->depress_timer))
			thread->depress_timer_active--;
	}

	while (thread->depress_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	thread_unlock(thread);
	splx(s);

	thread_policy_reset(thread);

	/*
	 * If we are the last thread to terminate and the task is
	 * associated with a BSD process, perform BSD process exit.
	 */
	task = thread->task;
	uthread_cleanup(task, thread->uthread, task->bsd_info);
	if (hw_atomic_sub(&task->active_thread_count, 1) == 0	&&
					task->bsd_info != NULL) {
		lastthread = 1;
	} 
	
	if (lastthread != 0)
		proc_exit(task->bsd_info);

	uthread_cred_free(thread->uthread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel wait timer, and wait for
	 *	concurrent expirations.
	 */
	if (thread->wait_timer_is_set) {
		thread->wait_timer_is_set = FALSE;

		if (timer_call_cancel(&thread->wait_timer))
			thread->wait_timer_active--;
	}

	while (thread->wait_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	/*
	 *	If there is a reserved stack, release it.
	 */
	if (thread->reserved_stack != 0) {
		if (thread->reserved_stack != thread->kernel_stack)
			stack_free_stack(thread->reserved_stack);
		thread->reserved_stack = 0;
	}

	/*
	 *	Mark thread as terminating, and block.
	 */
	thread->state |= TH_TERMINATE;
	thread_mark_wait_locked(thread, THREAD_UNINT);
	assert(thread->promotions == 0);
	thread_unlock(thread);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_continue);
	/*NOTREACHED*/
}