예제 #1
0
kern_return_t
ecc_log_get_next_event(struct ecc_event *ev)
{
	spl_t x;

	x = splhigh();
	lck_spin_lock(&ecc_data_lock);

	if (ecc_data_empty)  {
		assert(ecc_data_next_write == ecc_data_next_read);

		lck_spin_unlock(&ecc_data_lock);
		splx(x);
		return KERN_FAILURE;
	}

	bcopy(&ecc_data[ecc_data_next_read], ev, sizeof(*ev));
	ecc_data_next_read++;
	ecc_data_next_read %= ECC_EVENT_BUFFER_COUNT;

	if (ecc_data_next_read == ecc_data_next_write) {
		ecc_data_empty = TRUE;
	}

	lck_spin_unlock(&ecc_data_lock);
	splx(x);

	return KERN_SUCCESS;
}
예제 #2
0
kern_return_t
ecc_log_record_event(const struct ecc_event *ev)
{
	spl_t x;

	if (ev->count > ECC_EVENT_INFO_DATA_ENTRIES) {
		panic("Count of %u on ecc event is too large.", (unsigned)ev->count);
	}

	x = splhigh();
	lck_spin_lock(&ecc_data_lock);

	ecc_correction_count++;

	if (ecc_data_next_read == ecc_data_next_write && !ecc_data_empty)  {
		lck_spin_unlock(&ecc_data_lock);
		splx(x);
		return KERN_FAILURE;
	}

	bcopy(ev, &ecc_data[ecc_data_next_write], sizeof(*ev));
	ecc_data_next_write++;
	ecc_data_next_write %= ECC_EVENT_BUFFER_COUNT;
	ecc_data_empty = FALSE;

	lck_spin_unlock(&ecc_data_lock);
	splx(x);

	return KERN_SUCCESS;
}
예제 #3
0
static uint64_t
slice_allocator_release_pages(slice_allocator_t *sa, uint64_t num_pages)
{
	uint64_t num_pages_released = 0;
	list_t *list = &sa->free;
	
	lck_spin_lock(sa->spinlock);
	
	while (!list_is_empty(list) && (num_pages_released < num_pages)) {
		slice_t *slice = list_head(list);
		list_remove(list, slice);
		
		lck_spin_unlock(sa->spinlock);
		slice_fini(slice);
		
		osif_free(slice, sa->slice_size);
		num_pages_released += sa->slice_size / PAGE_SIZE;
		
		lck_spin_lock(sa->spinlock);
	}
	
	lck_spin_unlock(sa->spinlock);
	
	return num_pages_released;
}
예제 #4
0
파일: locks.c 프로젝트: JackieXie168/xnu
/*
 * Routine:	lck_spin_sleep_deadline
 */
wait_result_t
lck_spin_sleep_deadline(
        lck_spin_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible,
	uint64_t		deadline)
{
	wait_result_t   res;

	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	res = assert_wait_deadline(event, interruptible, deadline);
	if (res == THREAD_WAITING) {
		lck_spin_unlock(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
			lck_spin_lock(lck);
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		lck_spin_unlock(lck);

	return res;
}
예제 #5
0
slice_allocator_alloc(slice_allocator_t *sa, sa_size_t size)
#endif /* !DEBUG */
{
	slice_t *slice = 0;
	
	lck_spin_lock(sa->spinlock);
	
	/*
	 * Locate a slice with residual capacity. First, check for a partially
	 * full slice, and use some more of its capacity. Next, look to see if
	 * we have a ready to go empty slice. If not, finally go to underlying
	 * allocator for a new slice.
	 */
	if (!list_is_empty(&sa->partial)) {
		slice = list_head(&sa->partial);
	} else if (!list_is_empty(&sa->free)) {
		slice = list_tail(&sa->free);
		list_remove_tail(&sa->free);
		list_insert_head(&sa->partial, slice);
	} else {
		lck_spin_unlock(sa->spinlock);
		slice = (slice_t *)osif_malloc(sa->slice_size);;
		slice_init(slice, sa);
		lck_spin_lock(sa->spinlock);
		
		list_insert_head(&sa->partial, slice);
	}
	
#ifdef SA_CHECK_SLICE_SIZE
	if (sa->max_alloc_size != slice->sa->max_alloc_size) {
		REPORT("slice_allocator_alloc - alloc size (%llu) sa %llu slice"
			   " %llu\n", size, sa->max_alloc_size,
			   slice->sa->max_alloc_size);
	}
#endif /* SA_CHECK_SLICE_SIZE */
	
	/* Grab memory from the slice */
#ifndef DEBUG
	void *p = slice_alloc(slice);
#else
	void *p = slice_alloc(slice, size);
#endif /* !DEBUG */
	
	/*
	 * Check to see if the slice buffer has become full. If it has, then
	 * move it into the full list so that we no longer keep trying to
	 * allocate from it.
	 */
	if (slice_is_full(slice)) {
		list_remove(&sa->partial, slice);
#ifdef SLICE_ALLOCATOR_TRACK_FULL_SLABS
		list_insert_head(&sa->full, slice);
#endif /* SLICE_ALLOCATOR_TRACK_FULL_SLABS */
	}
	
	lck_spin_unlock(sa->spinlock);
	
	return (p);
}
RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
{
    AssertPtr(pState);
    Assert(pState->u32Reserved == 42);
    pState->u32Reserved = 0;
    RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);

    RTCPUID idCpu = RTMpCpuId();
    if (RT_UNLIKELY(idCpu < RT_ELEMENTS(g_aPreemptHacks)))
    {
        Assert(g_aPreemptHacks[idCpu].cRecursion > 0);
        if (--g_aPreemptHacks[idCpu].cRecursion == 0)
        {
            lck_spin_t *pSpinLock = g_aPreemptHacks[idCpu].pSpinLock;
            if (pSpinLock)
            {
                IPRT_DARWIN_SAVE_EFL_AC();
                lck_spin_unlock(pSpinLock);
                IPRT_DARWIN_RESTORE_EFL_AC();
            }
            else
                AssertFailed();
        }
    }
}
RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
        uint32_t fIntSaved = pThis->fIntSaved;
        pThis->fIntSaved = 0;
        lck_spin_unlock(pThis->pSpinLock);
        ASMSetFlags(fIntSaved);
    }
    else
        lck_spin_unlock(pThis->pSpinLock);
}
RTDECL(int)  RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
{
    PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
    RT_ASSERT_PREEMPT_CPUID_VAR();
    RT_ASSERT_INTS_ON();

    rtR0SemEventMultiDarwinRetain(pThis);
    lck_spin_lock(pThis->pSpinlock);

    /*
     * Set the signal and increment the generation counter.
     */
    uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen);
    fNew += 1 << RTSEMEVENTMULTIDARWIN_GEN_SHIFT;
    fNew |= RTSEMEVENTMULTIDARWIN_STATE_MASK;
    ASMAtomicWriteU32(&pThis->fStateAndGen, fNew);

    /*
     * Wake up all sleeping threads.
     */
    if (pThis->fHaveBlockedThreads)
    {
        ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, false);
        thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_AWAKENED);
    }

    lck_spin_unlock(pThis->pSpinlock);
    rtR0SemEventMultiDarwinRelease(pThis);

    RT_ASSERT_PREEMPT_CPUID();
    return VINF_SUCCESS;
}
RTDECL(int)  RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
{
    PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
    if (pThis == NIL_RTSEMEVENTMULTI)
        return VINF_SUCCESS;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
    Assert(pThis->cRefs > 0);
    RT_ASSERT_INTS_ON();

    lck_spin_lock(pThis->pSpinlock);

    ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENTMULTI_MAGIC); /* make the handle invalid */
    ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTIDARWIN_GEN_MASK);
    if (pThis->fHaveBlockedThreads)
    {
        /* abort waiting threads. */
        thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_RESTART);
    }

    lck_spin_unlock(pThis->pSpinlock);
    rtR0SemEventMultiDarwinRelease(pThis);

    return VINF_SUCCESS;
}
예제 #10
0
RTDECL(int)  RTSemMutexDestroy(RTSEMMUTEX hMutexSem)
{
    /*
     * Validate input.
     */
    PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
    if (pThis == NIL_RTSEMMUTEX)
        return VERR_INVALID_PARAMETER;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
    RT_ASSERT_INTS_ON();
    IPRT_DARWIN_SAVE_EFL_AC();

    /*
     * Kill it, wake up all waiting threads and release the reference.
     */
    AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMMUTEX_MAGIC, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE);
    lck_spin_lock(pThis->pSpinlock);

    if (pThis->cWaiters > 0)
        thread_wakeup_prim((event_t)pThis, FALSE /* one_thread */, THREAD_RESTART);

    if (ASMAtomicDecU32(&pThis->cRefs) == 0)
        rtSemMutexDarwinFree(pThis);
    else
        lck_spin_unlock(pThis->pSpinlock);

    IPRT_DARWIN_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
예제 #11
0
파일: ipc_port.c 프로젝트: Bitesher/xnu
/*
 *	Initialize all of the debugging state in a port.
 *	Insert the port into a global list of all allocated ports.
 */
void
ipc_port_init_debug(
	ipc_port_t	port,
	uintptr_t 	*callstack,
	unsigned int	callstack_max)
{
	unsigned int	i;

	port->ip_thread = current_thread();
	port->ip_timetrack = port_timestamp++;
	for (i = 0; i < callstack_max; ++i)
		port->ip_callstack[i] = callstack[i];	
	for (i = 0; i < IP_NSPARES; ++i)
		port->ip_spares[i] = 0;	

#ifdef MACH_BSD
	task_t task = current_task();
	if (task != TASK_NULL) {
		struct proc* proc = (struct proc*) get_bsdtask_info(task);
		if (proc)
			port->ip_spares[0] = proc_pid(proc);
	}
#endif /* MACH_BSD */

#if 0
	lck_spin_lock(&port_alloc_queue_lock);
	++port_count;
	if (port_count_warning > 0 && port_count >= port_count_warning)
		assert(port_count < port_count_warning);
	queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links);
	lck_spin_unlock(&port_alloc_queue_lock);
#endif
}
예제 #12
0
static void
slice_allocator_garbage_collect(slice_allocator_t *sa)
{
	sa_hrtime_t now = osif_gethrtime();
	int done = 0;
	
	lck_spin_lock(sa->spinlock);
	
	do {
		if (!list_is_empty(&sa->free)) {
			slice_t *slice = list_tail(&sa->free);
			
#ifdef SA_CHECK_SLICE_SIZE
			if (sa != slice->sa) {
				REPORT0("slice_allocator_free - slice not owned by sa detected.\n")
			}
#endif /* SA_CHECK_SLICE_SIZE */
			
			if (now - slice->time_freed >
			    SA_MAX_SLICE_FREE_MEM_AGE) {
				list_remove_tail(&sa->free);
				
				lck_spin_unlock(sa->spinlock);
				slice_fini(slice);
				osif_free(slice, sa->slice_size);
				lck_spin_lock(sa->spinlock);
			} else {
				done = 1;
			}
		} else {
			done = 1;
		}
	} while (!done);
예제 #13
0
static void
slice_allocator_empty_list(slice_allocator_t *sa, list_t *list)
{
	lck_spin_lock(sa->spinlock);
	
	while (!list_is_empty(list)) {
		slice_t *slice = list_head(list);
		list_remove(list, slice);
		
		lck_spin_unlock(sa->spinlock);
		slice_fini(slice);
		osif_free(slice, sa->slice_size);
		lck_spin_lock(sa->spinlock);
	}
	
	lck_spin_unlock(sa->spinlock);
}
예제 #14
0
/**
 * Called when the refcount reaches zero.
 */
static void rtSemMutexDarwinFree(PRTSEMMUTEXINTERNAL pThis)
{
    IPRT_DARWIN_SAVE_EFL_AC();

    lck_spin_unlock(pThis->pSpinlock);
    lck_spin_destroy(pThis->pSpinlock, g_pDarwinLockGroup);
    RTMemFree(pThis);

    IPRT_DARWIN_RESTORE_EFL_AC();
}
예제 #15
0
파일: ipc_port.c 프로젝트: Bitesher/xnu
void
ipc_port_track_dealloc(
	ipc_port_t		port)
{
	lck_spin_lock(&port_alloc_queue_lock);
	assert(port_count > 0);
	--port_count;
	queue_remove(&port_alloc_queue, port, ipc_port_t, ip_port_links);
	lck_spin_unlock(&port_alloc_queue_lock);
}
예제 #16
0
파일: ucode.c 프로젝트: JackieXie168/xnu
static void
cpu_update(__unused void *arg)
{
	/* grab the lock */
	lck_spin_lock(ucode_slock);

	/* execute the update */
	update_microcode();

	/* release the lock */
	lck_spin_unlock(ucode_slock);
}
예제 #17
0
int64_t
refcount_add(refcount_t *rc, __unused void *holder)
{
	int64_t count;

	lck_spin_lock((lck_spin_t *)&rc->rc_spinlck[0]);
	ASSERT(rc->rc_count >= 0);
	count = ++rc->rc_count;
	lck_spin_unlock((lck_spin_t *)&rc->rc_spinlck[0]);

	return (count);
}
예제 #18
0
slice_allocator_free(slice_allocator_t *sa, void *buf, sa_size_t size)
#endif /* !DEBUG */
{
	lck_spin_lock(sa->spinlock);
	
	/* Locate the slice buffer that the allocation lives within. */
	slice_t *slice;
	allocatable_row_t *row = 0;
	small_allocatable_row_t *small_row = 0;
	
	if (sa->flags & SMALL_ALLOC) {
		slice = slice_small_get_slice_from_row(buf, &small_row);
	} else {
		slice = slice_get_slice_from_row(buf, &row);
	}
	
#ifdef SA_CHECK_SLICE_SIZE
	if (sa != slice->sa) {
		REPORT0("slice_allocator_free - slice not owned by sa detected.\n")
	}
#endif /* SA_CHECK_SLICE_SIZE */
	
	/*
	 * If the slice was previously full, remove it from the free list and
	 * place it in the available list.
	 */
	if (slice_is_full(slice)) {
#ifdef SLICE_ALLOCATOR_TRACK_FULL_SLABS
		list_remove(&sa->full, slice);
#endif /* SLICE_ALLOCATOR_TRACK_FULL_SLABS */
		list_insert_tail(&sa->partial, slice);
	}
	
#ifndef DEBUG
	if (sa->flags & SMALL_ALLOC) {
		slice_small_free_row(slice, small_row);
	} else {
		slice_free_row(slice, row);
	}
#else
	slice_free_row(slice, row, size);
#endif /* !DEBUG */
	
	/* Finally migrate to the free list if needed. */
	if (slice_is_empty(slice)) {
		list_remove(&sa->partial, slice);
		slice->time_freed = osif_gethrtime();
		list_insert_head(&sa->free, slice);
	}
	
	lck_spin_unlock(sa->spinlock);
}
예제 #19
0
int64_t
refcount_add_many(refcount_t *rc, uint64_t number, __unused void *holder)
{
	int64_t count;

	lck_spin_lock((lck_spin_t *)&rc->rc_spinlck[0]);
	ASSERT(rc->rc_count >= 0);
	rc->rc_count += number;
	count = rc->rc_count;
	lck_spin_unlock((lck_spin_t *)&rc->rc_spinlck[0]);

	return (count);
}
예제 #20
0
int64_t
refcount_remove(refcount_t *rc, void *holder)
{
	int64_t count;

	lck_spin_lock((lck_spin_t *)&rc->rc_spinlck[0]);

	ASSERT(rc->rc_count >= 1);
	count = --rc->rc_count;

	lck_spin_unlock((lck_spin_t *)&rc->rc_spinlck[0]);
	return (count);
}
예제 #21
0
int64_t
refcount_remove_many(refcount_t *rc, uint64_t number, void *holder)
{
	int64_t count;

	lck_spin_lock((lck_spin_t *)&rc->rc_spinlck[0]);

	ASSERT(rc->rc_count >= number);
	rc->rc_count -= number;
	count = rc->rc_count;

	lck_spin_unlock((lck_spin_t *)&rc->rc_spinlck[0]);
	return (count);
}
예제 #22
0
static inline void
slice_insert_free_row(slice_t *slice, allocatable_row_t *row)
{
#ifdef SLICE_SPINLOCK
	lck_spin_lock(slice->spinlock);
#endif /* SLICE_SPINLOCK */
	
	row->navigation.next = slice->free_list.large;
	slice->free_list.large = row;
	
#ifdef SLICE_SPINLOCK
	lck_spin_unlock(slice->spinlock);
#endif /* SLICE_SPINLOCK */
}
예제 #23
0
/**
 * Internal worker for RTSemMutexRequest and RTSemMutexRequestNoResume
 *
 * @returns IPRT status code.
 * @param   hMutexSem           The mutex handle.
 * @param   cMillies            The timeout.
 * @param   fInterruptible      Whether it's interruptible
 *                              (RTSemMutexRequestNoResume) or not
 *                              (RTSemMutexRequest).
 */
DECLINLINE(int) rtR0SemMutexDarwinRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, wait_interrupt_t fInterruptible)
{
    /*
     * Validate input.
     */
    PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);
    RT_ASSERT_PREEMPTIBLE();
    IPRT_DARWIN_SAVE_EFL_AC();

    /*
     * Grab the lock and check out the state.
     */
    RTNATIVETHREAD  hNativeSelf = RTThreadNativeSelf();
    int             rc          = VINF_SUCCESS;
    lck_spin_lock(pThis->pSpinlock);

    /* Recursive call? */
    if (pThis->hNativeOwner == hNativeSelf)
    {
        Assert(pThis->cRecursions > 0);
        Assert(pThis->cRecursions < 256);
        pThis->cRecursions++;
    }

    /* Is it free and nobody ahead of us in the queue? */
    else if (   pThis->hNativeOwner == NIL_RTNATIVETHREAD
             && pThis->cWaiters     == 0)
    {
        pThis->hNativeOwner = hNativeSelf;
        pThis->cRecursions  = 1;
    }

    /* Polling call? */
    else if (cMillies == 0)
        rc = VERR_TIMEOUT;

    /* Yawn, time for a nap... */
    else
    {
        rc = rtR0SemMutexDarwinRequestSleep(pThis, cMillies, fInterruptible, hNativeSelf);
        IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
        return rc;
    }

    lck_spin_unlock(pThis->pSpinlock);
    IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
    return rc;
}
RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem)
{
    /*
     * Validate.
     */
    RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, false);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, false);

    /*
     * Take the lock and do the check.
     */
    lck_spin_lock(pThis->pSpinlock);
    bool fRc = pThis->hNativeOwner != NIL_RTNATIVETHREAD;
    lck_spin_unlock(pThis->pSpinlock);

    return fRc;
}
RTDECL(int)  RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
{
    PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
    RT_ASSERT_PREEMPT_CPUID_VAR();
    RT_ASSERT_INTS_ON();

    rtR0SemEventMultiDarwinRetain(pThis);
    lck_spin_lock(pThis->pSpinlock);

    ASMAtomicAndU32(&pThis->fStateAndGen, ~RTSEMEVENTMULTIDARWIN_STATE_MASK);

    lck_spin_unlock(pThis->pSpinlock);
    rtR0SemEventMultiDarwinRelease(pThis);

    RT_ASSERT_PREEMPT_CPUID();
    return VINF_SUCCESS;
}
예제 #26
0
static inline allocatable_row_t *
slice_get_row(slice_t *slice)
{
	if (slice->free_list.large == 0) {
		return (0);
	} else {
		allocatable_row_t *row;
		
#ifdef SLICE_SPINLOCK
		lck_spin_lock(slice->spinlock);
#endif /* SLICE_SPINLOCK */
		
		row = slice->free_list.large;
		slice->free_list.large = row->navigation.next;
		row->navigation.slice = slice;
		
#ifdef SLICE_SPINLOCK
		lck_spin_unlock(slice->spinlock);
#endif /* SLICE_SPINLOCK */
		return (row);
	}
}
예제 #27
0
RTDECL(int)  RTSemMutexRelease(RTSEMMUTEX hMutexSem)
{
    /*
     * Validate input.
     */
    PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);
    RT_ASSERT_PREEMPTIBLE();
    IPRT_DARWIN_SAVE_EFL_AC();

    /*
     * Take the lock and do the job.
     */
    RTNATIVETHREAD  hNativeSelf = RTThreadNativeSelf();
    int             rc          = VINF_SUCCESS;
    lck_spin_lock(pThis->pSpinlock);

    if (pThis->hNativeOwner == hNativeSelf)
    {
        Assert(pThis->cRecursions > 0);
        if (--pThis->cRecursions == 0)
        {
            pThis->hNativeOwner = NIL_RTNATIVETHREAD;
            if (pThis->cWaiters > 0)
                thread_wakeup_prim((event_t)pThis, TRUE /* one_thread */, THREAD_AWAKENED);

        }
    }
    else
        rc = VERR_NOT_OWNER;

    lck_spin_unlock(pThis->pSpinlock);

    AssertRC(rc);
    IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
    return VINF_SUCCESS;
}
예제 #28
0
/**
 * Internal worker for the sleep scenario.
 *
 * Called owning the spinlock, returns without it.
 *
 * @returns IPRT status code.
 * @param   pThis               The mutex instance.
 * @param   cMillies            The timeout.
 * @param   fInterruptible      Whether it's interruptible
 *                              (RTSemMutexRequestNoResume) or not
 *                              (RTSemMutexRequest).
 * @param   hNativeSelf         The thread handle of the caller.
 */
static int rtR0SemMutexDarwinRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
                                          wait_interrupt_t fInterruptible, RTNATIVETHREAD hNativeSelf)
{
    /*
     * Grab a reference and indicate that we're waiting.
     */
    pThis->cWaiters++;
    ASMAtomicIncU32(&pThis->cRefs);

    /*
     * Go to sleep, use the address of the mutex instance as sleep/blocking/event id.
     */
    wait_result_t rcWait;
    if (cMillies == RT_INDEFINITE_WAIT)
        rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible);
    else
    {
        uint64_t u64AbsTime;
        nanoseconds_to_absolutetime(cMillies * UINT64_C(1000000), &u64AbsTime);
        u64AbsTime += mach_absolute_time();

        rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT,
                                         (event_t)pThis, fInterruptible, u64AbsTime);
    }

    /*
     * Translate the rc.
     */
    int rc;
    switch (rcWait)
    {
        case THREAD_AWAKENED:
            if (RT_LIKELY(pThis->u32Magic == RTSEMMUTEX_MAGIC))
            {
                if (RT_LIKELY(   pThis->cRecursions  == 0
                              && pThis->hNativeOwner == NIL_RTNATIVETHREAD))
                {
                    pThis->cRecursions  = 1;
                    pThis->hNativeOwner = hNativeSelf;
                    rc = VINF_SUCCESS;
                }
                else
                {
                    Assert(pThis->cRecursions  == 0);
                    Assert(pThis->hNativeOwner == NIL_RTNATIVETHREAD);
                    rc = VERR_INTERNAL_ERROR_3;
                }
            }
            else
                rc = VERR_SEM_DESTROYED;
            break;

        case THREAD_TIMED_OUT:
            Assert(cMillies != RT_INDEFINITE_WAIT);
            rc = VERR_TIMEOUT;
            break;

        case THREAD_INTERRUPTED:
            Assert(fInterruptible);
            rc = VERR_INTERRUPTED;
            break;

        case THREAD_RESTART:
            Assert(pThis->u32Magic == ~RTSEMMUTEX_MAGIC);
            rc = VERR_SEM_DESTROYED;
            break;

        default:
            AssertMsgFailed(("rcWait=%d\n", rcWait));
            rc = VERR_GENERAL_FAILURE;
            break;
    }

    /*
     * Dereference it and quit the lock.
     */
    Assert(pThis->cWaiters > 0);
    pThis->cWaiters--;

    Assert(pThis->cRefs > 0);
    if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0))
        rtSemMutexDarwinFree(pThis);
    else
        lck_spin_unlock(pThis->pSpinlock);
    return rc;
}
예제 #29
0
파일: kern_fork.c 프로젝트: 0xffea/xnu
void
proc_spinunlock(proc_t p)
{
	lck_spin_unlock(&p->p_slock);
}
/**
 * Called when the refcount reaches zero.
 */
static void rtSemMutexDarwinFree(PRTSEMMUTEXINTERNAL pThis)
{
    lck_spin_unlock(pThis->pSpinlock);
    lck_spin_destroy(pThis->pSpinlock, g_pDarwinLockGroup);
    RTMemFree(pThis);
}