Exemple #1
0
static void
slice_allocator_garbage_collect(slice_allocator_t *sa)
{
	sa_hrtime_t now = osif_gethrtime();
	int done = 0;
	
	lck_spin_lock(sa->spinlock);
	
	do {
		if (!list_is_empty(&sa->free)) {
			slice_t *slice = list_tail(&sa->free);
			
#ifdef SA_CHECK_SLICE_SIZE
			if (sa != slice->sa) {
				REPORT0("slice_allocator_free - slice not owned by sa detected.\n")
			}
#endif /* SA_CHECK_SLICE_SIZE */
			
			if (now - slice->time_freed >
			    SA_MAX_SLICE_FREE_MEM_AGE) {
				list_remove_tail(&sa->free);
				
				lck_spin_unlock(sa->spinlock);
				slice_fini(slice);
				osif_free(slice, sa->slice_size);
				lck_spin_lock(sa->spinlock);
			} else {
				done = 1;
			}
		} else {
			done = 1;
		}
	} while (!done);
Exemple #2
0
static uint64_t
slice_allocator_release_pages(slice_allocator_t *sa, uint64_t num_pages)
{
	uint64_t num_pages_released = 0;
	list_t *list = &sa->free;
	
	lck_spin_lock(sa->spinlock);
	
	while (!list_is_empty(list) && (num_pages_released < num_pages)) {
		slice_t *slice = list_head(list);
		list_remove(list, slice);
		
		lck_spin_unlock(sa->spinlock);
		slice_fini(slice);
		
		osif_free(slice, sa->slice_size);
		num_pages_released += sa->slice_size / PAGE_SIZE;
		
		lck_spin_lock(sa->spinlock);
	}
	
	lck_spin_unlock(sa->spinlock);
	
	return num_pages_released;
}
Exemple #3
0
slice_allocator_alloc(slice_allocator_t *sa, sa_size_t size)
#endif /* !DEBUG */
{
	slice_t *slice = 0;
	
	lck_spin_lock(sa->spinlock);
	
	/*
	 * Locate a slice with residual capacity. First, check for a partially
	 * full slice, and use some more of its capacity. Next, look to see if
	 * we have a ready to go empty slice. If not, finally go to underlying
	 * allocator for a new slice.
	 */
	if (!list_is_empty(&sa->partial)) {
		slice = list_head(&sa->partial);
	} else if (!list_is_empty(&sa->free)) {
		slice = list_tail(&sa->free);
		list_remove_tail(&sa->free);
		list_insert_head(&sa->partial, slice);
	} else {
		lck_spin_unlock(sa->spinlock);
		slice = (slice_t *)osif_malloc(sa->slice_size);;
		slice_init(slice, sa);
		lck_spin_lock(sa->spinlock);
		
		list_insert_head(&sa->partial, slice);
	}
	
#ifdef SA_CHECK_SLICE_SIZE
	if (sa->max_alloc_size != slice->sa->max_alloc_size) {
		REPORT("slice_allocator_alloc - alloc size (%llu) sa %llu slice"
			   " %llu\n", size, sa->max_alloc_size,
			   slice->sa->max_alloc_size);
	}
#endif /* SA_CHECK_SLICE_SIZE */
	
	/* Grab memory from the slice */
#ifndef DEBUG
	void *p = slice_alloc(slice);
#else
	void *p = slice_alloc(slice, size);
#endif /* !DEBUG */
	
	/*
	 * Check to see if the slice buffer has become full. If it has, then
	 * move it into the full list so that we no longer keep trying to
	 * allocate from it.
	 */
	if (slice_is_full(slice)) {
		list_remove(&sa->partial, slice);
#ifdef SLICE_ALLOCATOR_TRACK_FULL_SLABS
		list_insert_head(&sa->full, slice);
#endif /* SLICE_ALLOCATOR_TRACK_FULL_SLABS */
	}
	
	lck_spin_unlock(sa->spinlock);
	
	return (p);
}
Exemple #4
0
kern_return_t
ecc_log_record_event(const struct ecc_event *ev)
{
	spl_t x;

	if (ev->count > ECC_EVENT_INFO_DATA_ENTRIES) {
		panic("Count of %u on ecc event is too large.", (unsigned)ev->count);
	}

	x = splhigh();
	lck_spin_lock(&ecc_data_lock);

	ecc_correction_count++;

	if (ecc_data_next_read == ecc_data_next_write && !ecc_data_empty)  {
		lck_spin_unlock(&ecc_data_lock);
		splx(x);
		return KERN_FAILURE;
	}

	bcopy(ev, &ecc_data[ecc_data_next_write], sizeof(*ev));
	ecc_data_next_write++;
	ecc_data_next_write %= ECC_EVENT_BUFFER_COUNT;
	ecc_data_empty = FALSE;

	lck_spin_unlock(&ecc_data_lock);
	splx(x);

	return KERN_SUCCESS;
}
RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
{
    AssertPtr(pState);
    Assert(pState->u32Reserved == 0);
    pState->u32Reserved = 42;

    /*
     * Disable to prevent preemption while we grab the per-cpu spin lock.
     * Note! Only take the lock on the first call or we end up spinning for ever.
     */
    RTCCUINTREG fSavedFlags = ASMIntDisableFlags();
    RTCPUID     idCpu       = RTMpCpuId();
    if (RT_UNLIKELY(idCpu < RT_ELEMENTS(g_aPreemptHacks)))
    {
        Assert(g_aPreemptHacks[idCpu].cRecursion < UINT32_MAX / 2);
        if (++g_aPreemptHacks[idCpu].cRecursion == 1)
        {
            lck_spin_t *pSpinLock = g_aPreemptHacks[idCpu].pSpinLock;
            if (pSpinLock)
                lck_spin_lock(pSpinLock);
            else
                AssertFailed();
        }
    }
    ASMSetFlags(fSavedFlags);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
}
RTDECL(int)  RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem)
{
    PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
    RT_ASSERT_PREEMPT_CPUID_VAR();
    RT_ASSERT_INTS_ON();

    rtR0SemEventMultiDarwinRetain(pThis);
    lck_spin_lock(pThis->pSpinlock);

    /*
     * Set the signal and increment the generation counter.
     */
    uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen);
    fNew += 1 << RTSEMEVENTMULTIDARWIN_GEN_SHIFT;
    fNew |= RTSEMEVENTMULTIDARWIN_STATE_MASK;
    ASMAtomicWriteU32(&pThis->fStateAndGen, fNew);

    /*
     * Wake up all sleeping threads.
     */
    if (pThis->fHaveBlockedThreads)
    {
        ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, false);
        thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_AWAKENED);
    }

    lck_spin_unlock(pThis->pSpinlock);
    rtR0SemEventMultiDarwinRelease(pThis);

    RT_ASSERT_PREEMPT_CPUID();
    return VINF_SUCCESS;
}
RTDECL(int)  RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem)
{
    PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
    if (pThis == NIL_RTSEMEVENTMULTI)
        return VINF_SUCCESS;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
    Assert(pThis->cRefs > 0);
    RT_ASSERT_INTS_ON();

    lck_spin_lock(pThis->pSpinlock);

    ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENTMULTI_MAGIC); /* make the handle invalid */
    ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTIDARWIN_GEN_MASK);
    if (pThis->fHaveBlockedThreads)
    {
        /* abort waiting threads. */
        thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_RESTART);
    }

    lck_spin_unlock(pThis->pSpinlock);
    rtR0SemEventMultiDarwinRelease(pThis);

    return VINF_SUCCESS;
}
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
        uint32_t fIntSaved = ASMGetFlags();
        ASMIntDisable();
        lck_spin_lock(pThis->pSpinLock);
        pThis->fIntSaved = fIntSaved;
    }
    else
        lck_spin_lock(pThis->pSpinLock);
}
RTDECL(int)  RTSemMutexDestroy(RTSEMMUTEX hMutexSem)
{
    /*
     * Validate input.
     */
    PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
    if (pThis == NIL_RTSEMMUTEX)
        return VERR_INVALID_PARAMETER;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE);
    RT_ASSERT_INTS_ON();
    IPRT_DARWIN_SAVE_EFL_AC();

    /*
     * Kill it, wake up all waiting threads and release the reference.
     */
    AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMMUTEX_MAGIC, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE);
    lck_spin_lock(pThis->pSpinlock);

    if (pThis->cWaiters > 0)
        thread_wakeup_prim((event_t)pThis, FALSE /* one_thread */, THREAD_RESTART);

    if (ASMAtomicDecU32(&pThis->cRefs) == 0)
        rtSemMutexDarwinFree(pThis);
    else
        lck_spin_unlock(pThis->pSpinlock);

    IPRT_DARWIN_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
Exemple #10
0
/*
 *	Initialize all of the debugging state in a port.
 *	Insert the port into a global list of all allocated ports.
 */
void
ipc_port_init_debug(
	ipc_port_t	port,
	uintptr_t 	*callstack,
	unsigned int	callstack_max)
{
	unsigned int	i;

	port->ip_thread = current_thread();
	port->ip_timetrack = port_timestamp++;
	for (i = 0; i < callstack_max; ++i)
		port->ip_callstack[i] = callstack[i];	
	for (i = 0; i < IP_NSPARES; ++i)
		port->ip_spares[i] = 0;	

#ifdef MACH_BSD
	task_t task = current_task();
	if (task != TASK_NULL) {
		struct proc* proc = (struct proc*) get_bsdtask_info(task);
		if (proc)
			port->ip_spares[0] = proc_pid(proc);
	}
#endif /* MACH_BSD */

#if 0
	lck_spin_lock(&port_alloc_queue_lock);
	++port_count;
	if (port_count_warning > 0 && port_count >= port_count_warning)
		assert(port_count < port_count_warning);
	queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links);
	lck_spin_unlock(&port_alloc_queue_lock);
#endif
}
Exemple #11
0
/*
 * Routine:	lck_spin_sleep_deadline
 */
wait_result_t
lck_spin_sleep_deadline(
        lck_spin_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible,
	uint64_t		deadline)
{
	wait_result_t   res;

	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	res = assert_wait_deadline(event, interruptible, deadline);
	if (res == THREAD_WAITING) {
		lck_spin_unlock(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
			lck_spin_lock(lck);
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		lck_spin_unlock(lck);

	return res;
}
Exemple #12
0
kern_return_t
ecc_log_get_next_event(struct ecc_event *ev)
{
	spl_t x;

	x = splhigh();
	lck_spin_lock(&ecc_data_lock);

	if (ecc_data_empty)  {
		assert(ecc_data_next_write == ecc_data_next_read);

		lck_spin_unlock(&ecc_data_lock);
		splx(x);
		return KERN_FAILURE;
	}

	bcopy(&ecc_data[ecc_data_next_read], ev, sizeof(*ev));
	ecc_data_next_read++;
	ecc_data_next_read %= ECC_EVENT_BUFFER_COUNT;

	if (ecc_data_next_read == ecc_data_next_write) {
		ecc_data_empty = TRUE;
	}

	lck_spin_unlock(&ecc_data_lock);
	splx(x);

	return KERN_SUCCESS;
}
Exemple #13
0
static void
slice_allocator_empty_list(slice_allocator_t *sa, list_t *list)
{
	lck_spin_lock(sa->spinlock);
	
	while (!list_is_empty(list)) {
		slice_t *slice = list_head(list);
		list_remove(list, slice);
		
		lck_spin_unlock(sa->spinlock);
		slice_fini(slice);
		osif_free(slice, sa->slice_size);
		lck_spin_lock(sa->spinlock);
	}
	
	lck_spin_unlock(sa->spinlock);
}
Exemple #14
0
void
ipc_port_track_dealloc(
	ipc_port_t		port)
{
	lck_spin_lock(&port_alloc_queue_lock);
	assert(port_count > 0);
	--port_count;
	queue_remove(&port_alloc_queue, port, ipc_port_t, ip_port_links);
	lck_spin_unlock(&port_alloc_queue_lock);
}
Exemple #15
0
static void
cpu_update(__unused void *arg)
{
	/* grab the lock */
	lck_spin_lock(ucode_slock);

	/* execute the update */
	update_microcode();

	/* release the lock */
	lck_spin_unlock(ucode_slock);
}
int64_t
refcount_add(refcount_t *rc, __unused void *holder)
{
	int64_t count;

	lck_spin_lock((lck_spin_t *)&rc->rc_spinlck[0]);
	ASSERT(rc->rc_count >= 0);
	count = ++rc->rc_count;
	lck_spin_unlock((lck_spin_t *)&rc->rc_spinlck[0]);

	return (count);
}
Exemple #17
0
slice_allocator_free(slice_allocator_t *sa, void *buf, sa_size_t size)
#endif /* !DEBUG */
{
	lck_spin_lock(sa->spinlock);
	
	/* Locate the slice buffer that the allocation lives within. */
	slice_t *slice;
	allocatable_row_t *row = 0;
	small_allocatable_row_t *small_row = 0;
	
	if (sa->flags & SMALL_ALLOC) {
		slice = slice_small_get_slice_from_row(buf, &small_row);
	} else {
		slice = slice_get_slice_from_row(buf, &row);
	}
	
#ifdef SA_CHECK_SLICE_SIZE
	if (sa != slice->sa) {
		REPORT0("slice_allocator_free - slice not owned by sa detected.\n")
	}
#endif /* SA_CHECK_SLICE_SIZE */
	
	/*
	 * If the slice was previously full, remove it from the free list and
	 * place it in the available list.
	 */
	if (slice_is_full(slice)) {
#ifdef SLICE_ALLOCATOR_TRACK_FULL_SLABS
		list_remove(&sa->full, slice);
#endif /* SLICE_ALLOCATOR_TRACK_FULL_SLABS */
		list_insert_tail(&sa->partial, slice);
	}
	
#ifndef DEBUG
	if (sa->flags & SMALL_ALLOC) {
		slice_small_free_row(slice, small_row);
	} else {
		slice_free_row(slice, row);
	}
#else
	slice_free_row(slice, row, size);
#endif /* !DEBUG */
	
	/* Finally migrate to the free list if needed. */
	if (slice_is_empty(slice)) {
		list_remove(&sa->partial, slice);
		slice->time_freed = osif_gethrtime();
		list_insert_head(&sa->free, slice);
	}
	
	lck_spin_unlock(sa->spinlock);
}
int64_t
refcount_add_many(refcount_t *rc, uint64_t number, __unused void *holder)
{
	int64_t count;

	lck_spin_lock((lck_spin_t *)&rc->rc_spinlck[0]);
	ASSERT(rc->rc_count >= 0);
	rc->rc_count += number;
	count = rc->rc_count;
	lck_spin_unlock((lck_spin_t *)&rc->rc_spinlck[0]);

	return (count);
}
int64_t
refcount_remove(refcount_t *rc, void *holder)
{
	int64_t count;

	lck_spin_lock((lck_spin_t *)&rc->rc_spinlck[0]);

	ASSERT(rc->rc_count >= 1);
	count = --rc->rc_count;

	lck_spin_unlock((lck_spin_t *)&rc->rc_spinlck[0]);
	return (count);
}
Exemple #20
0
static inline void
slice_insert_free_row(slice_t *slice, allocatable_row_t *row)
{
#ifdef SLICE_SPINLOCK
	lck_spin_lock(slice->spinlock);
#endif /* SLICE_SPINLOCK */
	
	row->navigation.next = slice->free_list.large;
	slice->free_list.large = row;
	
#ifdef SLICE_SPINLOCK
	lck_spin_unlock(slice->spinlock);
#endif /* SLICE_SPINLOCK */
}
/**
 * Internal worker for RTSemMutexRequest and RTSemMutexRequestNoResume
 *
 * @returns IPRT status code.
 * @param   hMutexSem           The mutex handle.
 * @param   cMillies            The timeout.
 * @param   fInterruptible      Whether it's interruptible
 *                              (RTSemMutexRequestNoResume) or not
 *                              (RTSemMutexRequest).
 */
DECLINLINE(int) rtR0SemMutexDarwinRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, wait_interrupt_t fInterruptible)
{
    /*
     * Validate input.
     */
    PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);
    RT_ASSERT_PREEMPTIBLE();
    IPRT_DARWIN_SAVE_EFL_AC();

    /*
     * Grab the lock and check out the state.
     */
    RTNATIVETHREAD  hNativeSelf = RTThreadNativeSelf();
    int             rc          = VINF_SUCCESS;
    lck_spin_lock(pThis->pSpinlock);

    /* Recursive call? */
    if (pThis->hNativeOwner == hNativeSelf)
    {
        Assert(pThis->cRecursions > 0);
        Assert(pThis->cRecursions < 256);
        pThis->cRecursions++;
    }

    /* Is it free and nobody ahead of us in the queue? */
    else if (   pThis->hNativeOwner == NIL_RTNATIVETHREAD
             && pThis->cWaiters     == 0)
    {
        pThis->hNativeOwner = hNativeSelf;
        pThis->cRecursions  = 1;
    }

    /* Polling call? */
    else if (cMillies == 0)
        rc = VERR_TIMEOUT;

    /* Yawn, time for a nap... */
    else
    {
        rc = rtR0SemMutexDarwinRequestSleep(pThis, cMillies, fInterruptible, hNativeSelf);
        IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
        return rc;
    }

    lck_spin_unlock(pThis->pSpinlock);
    IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
    return rc;
}
int64_t
refcount_remove_many(refcount_t *rc, uint64_t number, void *holder)
{
	int64_t count;

	lck_spin_lock((lck_spin_t *)&rc->rc_spinlck[0]);

	ASSERT(rc->rc_count >= number);
	rc->rc_count -= number;
	count = rc->rc_count;

	lck_spin_unlock((lck_spin_t *)&rc->rc_spinlck[0]);
	return (count);
}
RTDECL(bool) RTSemMutexIsOwned(RTSEMMUTEX hMutexSem)
{
    /*
     * Validate.
     */
    RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, false);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, false);

    /*
     * Take the lock and do the check.
     */
    lck_spin_lock(pThis->pSpinlock);
    bool fRc = pThis->hNativeOwner != NIL_RTNATIVETHREAD;
    lck_spin_unlock(pThis->pSpinlock);

    return fRc;
}
RTDECL(int)  RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
{
    PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
    RT_ASSERT_PREEMPT_CPUID_VAR();
    RT_ASSERT_INTS_ON();

    rtR0SemEventMultiDarwinRetain(pThis);
    lck_spin_lock(pThis->pSpinlock);

    ASMAtomicAndU32(&pThis->fStateAndGen, ~RTSEMEVENTMULTIDARWIN_STATE_MASK);

    lck_spin_unlock(pThis->pSpinlock);
    rtR0SemEventMultiDarwinRelease(pThis);

    RT_ASSERT_PREEMPT_CPUID();
    return VINF_SUCCESS;
}
Exemple #25
0
static inline allocatable_row_t *
slice_get_row(slice_t *slice)
{
	if (slice->free_list.large == 0) {
		return (0);
	} else {
		allocatable_row_t *row;
		
#ifdef SLICE_SPINLOCK
		lck_spin_lock(slice->spinlock);
#endif /* SLICE_SPINLOCK */
		
		row = slice->free_list.large;
		slice->free_list.large = row->navigation.next;
		row->navigation.slice = slice;
		
#ifdef SLICE_SPINLOCK
		lck_spin_unlock(slice->spinlock);
#endif /* SLICE_SPINLOCK */
		return (row);
	}
}
RTDECL(int)  RTSemMutexRelease(RTSEMMUTEX hMutexSem)
{
    /*
     * Validate input.
     */
    PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);
    RT_ASSERT_PREEMPTIBLE();
    IPRT_DARWIN_SAVE_EFL_AC();

    /*
     * Take the lock and do the job.
     */
    RTNATIVETHREAD  hNativeSelf = RTThreadNativeSelf();
    int             rc          = VINF_SUCCESS;
    lck_spin_lock(pThis->pSpinlock);

    if (pThis->hNativeOwner == hNativeSelf)
    {
        Assert(pThis->cRecursions > 0);
        if (--pThis->cRecursions == 0)
        {
            pThis->hNativeOwner = NIL_RTNATIVETHREAD;
            if (pThis->cWaiters > 0)
                thread_wakeup_prim((event_t)pThis, TRUE /* one_thread */, THREAD_AWAKENED);

        }
    }
    else
        rc = VERR_NOT_OWNER;

    lck_spin_unlock(pThis->pSpinlock);

    AssertRC(rc);
    IPRT_DARWIN_RESTORE_EFL_ONLY_AC();
    return VINF_SUCCESS;
}
Exemple #27
0
void
proc_spinlock(proc_t p)
{
	lck_spin_lock(&p->p_slock);
}
/**
 * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug.
 *
 * @returns VBox status code.
 * @param   pThis           The event semaphore.
 * @param   fFlags          See RTSemEventMultiWaitEx.
 * @param   uTimeout        See RTSemEventMultiWaitEx.
 * @param   pSrcPos         The source code position of the wait.
 */
static int rtR0SemEventMultiDarwinWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout,
                                       PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
    AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
    if (uTimeout != 0 || (fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
        RT_ASSERT_PREEMPTIBLE();

    rtR0SemEventMultiDarwinRetain(pThis);
    lck_spin_lock(pThis->pSpinlock);

    /*
     * Is the event already signalled or do we have to wait?
     */
    int rc;
    uint32_t const fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen);
    if (fOrgStateAndGen & RTSEMEVENTMULTIDARWIN_STATE_MASK)
        rc = VINF_SUCCESS;
    else
    {
        /*
         * We have to wait. So, we'll need to convert the timeout and figure
         * out if it's indefinite or not.
         */
        uint64_t uNsAbsTimeout = 1;
        if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
        {
            if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
                uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000)
                         ? uTimeout * UINT32_C(1000000)
                         : UINT64_MAX;
            if (uTimeout == UINT64_MAX)
                fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
            else
            {
                uint64_t u64Now;
                if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
                {
                    if (uTimeout != 0)
                    {
                        u64Now = RTTimeSystemNanoTS();
                        uNsAbsTimeout = u64Now + uTimeout;
                        if (uNsAbsTimeout < u64Now) /* overflow */
                            fFlags |= RTSEMWAIT_FLAGS_INDEFINITE;
                    }
                }
                else
                {
                    uNsAbsTimeout = uTimeout;
                    u64Now        = RTTimeSystemNanoTS();
                    uTimeout      = u64Now < uTimeout ? uTimeout - u64Now : 0;
                }
            }
        }

        if (   !(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
            && uTimeout == 0)
        {
            /*
             * Poll call, we already checked the condition above so no need to
             * wait for anything.
             */
            rc = VERR_TIMEOUT;
        }
        else
        {
            for (;;)
            {
                /*
                 * Do the actual waiting.
                 */
                ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, true);
                wait_interrupt_t fInterruptible = fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE ? THREAD_ABORTSAFE : THREAD_UNINT;
                wait_result_t    rcWait;
                if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
                    rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible);
                else
                {
                    uint64_t u64AbsTime;
                    nanoseconds_to_absolutetime(uNsAbsTimeout, &u64AbsTime);
                    rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT,
                                                     (event_t)pThis, fInterruptible, u64AbsTime);
                }

                /*
                 * Deal with the wait result.
                 */
                if (RT_LIKELY(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC))
                {
                    switch (rcWait)
                    {
                        case THREAD_AWAKENED:
                            if (RT_LIKELY(ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen))
                                rc = VINF_SUCCESS;
                            else if (fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE)
                                rc = VERR_INTERRUPTED;
                            else
                                continue; /* Seen this happen after fork/exec/something. */
                            break;

                        case THREAD_TIMED_OUT:
                            Assert(!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE));
                            rc = VERR_TIMEOUT;
                            break;

                        case THREAD_INTERRUPTED:
                            Assert(fInterruptible != THREAD_UNINT);
                            rc = VERR_INTERRUPTED;
                            break;

                        case THREAD_RESTART:
                            AssertMsg(pThis->u32Magic == ~RTSEMEVENTMULTI_MAGIC, ("%#x\n", pThis->u32Magic));
                            rc = VERR_SEM_DESTROYED;
                            break;

                        default:
                            AssertMsgFailed(("rcWait=%d\n", rcWait));
                            rc = VERR_INTERNAL_ERROR_3;
                            break;
                    }
                }
                else
                    rc = VERR_SEM_DESTROYED;
                break;
            }
        }
    }

    lck_spin_unlock(pThis->pSpinlock);
    rtR0SemEventMultiDarwinRelease(pThis);
    return rc;
}
Exemple #29
0
void lck_rw_ilk_lock(lck_rw_t *lck)
{
    lck_spin_lock(lck);
}
Exemple #30
0
void
usimple_lock(usimple_lock_t l)
{
    lck_spin_lock((lck_spin_t *)l);
}