예제 #1
0
파일: mutex.c 프로젝트: Brandon7357/rockbox
/* Gain ownership of a mutex object or block until it becomes free */
void mutex_lock(struct mutex *m)
{
    struct thread_entry *current = __running_self_entry();

    if(current == m->blocker.thread)
    {
        /* current thread already owns this mutex */
        m->recursion++;
        return;
    }

    /* lock out other cores */
    corelock_lock(&m->cl);

    /* must read thread again inside cs (a multiprocessor concern really) */
    if(LIKELY(m->blocker.thread == NULL))
    {
        /* lock is open */
        m->blocker.thread = current;
        corelock_unlock(&m->cl);
        return;
    }

    /* block until the lock is open... */
    disable_irq();
    block_thread(current, TIMEOUT_BLOCK, &m->queue, &m->blocker);

    corelock_unlock(&m->cl);

    /* ...and turn control over to next thread */
    switch_thread();
}
예제 #2
0
파일: mutex.c 프로젝트: Brandon7357/rockbox
/* Release ownership of a mutex object - only owning thread must call this */
void mutex_unlock(struct mutex *m)
{
    /* unlocker not being the owner is an unlocking violation */
    KERNEL_ASSERT(m->blocker.thread == __running_self_entry(),
                  "mutex_unlock->wrong thread (%s != %s)\n",
                  m->blocker.thread->name,
                  __running_self_entry()->name);

    if(m->recursion > 0)
    {
        /* this thread still owns lock */
        m->recursion--;
        return;
    }

    /* lock out other cores */
    corelock_lock(&m->cl);

    /* transfer to next queued thread if any */
    struct thread_entry *thread = WQ_THREAD_FIRST(&m->queue);
    if(LIKELY(thread == NULL))
    {
        /* no threads waiting - open the lock */
        m->blocker.thread = NULL;
        corelock_unlock(&m->cl);
        return;
    }

    const int oldlevel = disable_irq_save();
    /* Tranfer of owning thread is handled in the wakeup protocol
     * if priorities are enabled otherwise just set it from the
     * queue head. */
#ifndef HAVE_PRIORITY_SCHEDULING
    m->blocker.thread = thread;
#endif
    unsigned int result = wakeup_thread(thread, WAKEUP_TRANSFER);
    restore_irq(oldlevel);

    corelock_unlock(&m->cl);

#ifdef HAVE_PRIORITY_SCHEDULING
    if(result & THREAD_SWITCH)
        switch_thread();
#endif
    (void)result;
}
/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
 * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
 * safely be used in an ISR. */
int semaphore_wait(struct semaphore *s, int timeout)
{
    int ret = OBJ_WAIT_TIMEDOUT;

    int oldlevel = disable_irq_save();
    corelock_lock(&s->cl);

    int count = s->count;
    if(LIKELY(count > 0))
    {
        /* count is not zero; down it */
        s->count = count - 1;
        ret = OBJ_WAIT_SUCCEEDED;
    }
    else if(timeout != 0)
    {
        ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel);

        /* too many waits - block until count is upped... */
        struct thread_entry *current = __running_self_entry();

        block_thread(current, timeout, &s->queue, NULL);
        corelock_unlock(&s->cl);

        /* ...and turn control over to next thread */
        switch_thread();

        /* if explicit wake indicated; do no more */
        if(LIKELY(!wait_queue_ptr(current)))
            return OBJ_WAIT_SUCCEEDED;

        disable_irq();
        corelock_lock(&s->cl);

        /* see if anyone got us after the expired wait */
        if(wait_queue_try_remove(current))
        {
            count = s->count;
            if(count > 0)
            {
                /* down it lately */
                s->count = count - 1;
                ret = OBJ_WAIT_SUCCEEDED;
            }
        }
    }
    /* else just polling it */

    corelock_unlock(&s->cl);
    restore_irq(oldlevel);

    return ret;
}