コード例 #1
0
/* Up the semaphore's count and release any thread waiting at the head of the
 * queue. The count is saturated to the value of the 'max' parameter specified
 * in 'semaphore_init'. */
void semaphore_release(struct semaphore *s)
{
    unsigned int result = THREAD_NONE;

    int oldlevel = disable_irq_save();
    corelock_lock(&s->cl);

    struct thread_entry *thread = WQ_THREAD_FIRST(&s->queue);
    if(LIKELY(thread != NULL))
    {
        /* a thread was queued - wake it up and keep count at 0 */
        KERNEL_ASSERT(s->count == 0,
            "semaphore_release->threads queued but count=%d!\n", s->count);
        result = wakeup_thread(thread, WAKEUP_DEFAULT);
    }
    else
    {
        int count = s->count;
        if(count < s->max)
        {
            /* nothing waiting - up it */
            s->count = count + 1;
        }
    }

    corelock_unlock(&s->cl);
    restore_irq(oldlevel);

#if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context)
    /* No thread switch if not thread context */
    if((result & THREAD_SWITCH) && is_thread_context())
        switch_thread();
#endif
    (void)result;
}
コード例 #2
0
ファイル: kbd.c プロジェクト: PyroOS/Pyro
static int kbd_irq( int nIrqNum, void* pData, SysCallRegs_s* psRegs )
{
    int	nCode;
    int	nEFlg;
    int n;
	
    nCode = inb_p( 0x60 );

    nEFlg = cli();
    n = inb_p( 0x61 );
    outb_p( n | 0x80, 0x61 );
    outb_p( n & ~0x80, 0x61 );

    nCode = ConvertKeyCode( nCode );

    if ( 0 != nCode )
    {
	g_sVolume.zBuffer[ atomic_inc_and_read( &g_sVolume.nInPos ) & 0xff ] = nCode;
    
	atomic_inc( &g_sVolume.nBytesReceived );

	if ( -1 != g_sVolume.hWaitThread ) {
	    wakeup_thread( g_sVolume.hWaitThread, false );
	}
    }
    put_cpu_flags( nEFlg );
    return( 0 );
}
コード例 #3
0
/* Puts the specified return value in the waiting thread's return value
 * and wakes the thread.
 *
 * A sender should be confirmed to exist before calling which makes it
 * more efficent to reject the majority of cases that don't need this
 * called.
 */
static void queue_release_sender(struct thread_entry * volatile * sender,
                                 intptr_t retval)
{
    struct thread_entry *thread = *sender;

    *sender = NULL;               /* Clear slot. */
#ifdef HAVE_WAKEUP_EXT_CB
    thread->wakeup_ext_cb = NULL; /* Clear callback. */
#endif
    thread->retval = retval;      /* Assign thread-local return value. */
    *thread->bqp = thread;        /* Move blocking queue head to thread since
                                     wakeup_thread wakes the first thread in
                                     the list. */
    wakeup_thread(thread->bqp);
}
コード例 #4
0
ファイル: kbd.c プロジェクト: PyroOS/Pyro
static int  kbd_write( void* pNode, void* pCookie, off_t nPosition, const void* pBuffer, size_t nSize )
{
	int i;
	for ( i = 0 ; i < nSize ; ++i )
	{
		g_sVolume.zBuffer[ atomic_inc_and_read( &g_sVolume.nInPos ) & 0xff ] = ((char *)pBuffer)[i];
    
		atomic_inc( &g_sVolume.nBytesReceived );

		if ( -1 != g_sVolume.hWaitThread )
		{
			wakeup_thread( g_sVolume.hWaitThread, false );
		}
	}
	return(nSize);
}
コード例 #5
0
ファイル: thread-sdl.c プロジェクト: ntj/rockbox
unsigned int thread_queue_wake(struct thread_entry **list)
{
    unsigned int result = THREAD_NONE;

    for (;;)
    {
        unsigned int rc = wakeup_thread(list);

        if (rc == THREAD_NONE)
            break;

        result |= rc;
    }

    return result;
}
コード例 #6
0
ファイル: mutex.c プロジェクト: Brandon7357/rockbox
/* Release ownership of a mutex object - only owning thread must call this */
void mutex_unlock(struct mutex *m)
{
    /* unlocker not being the owner is an unlocking violation */
    KERNEL_ASSERT(m->blocker.thread == __running_self_entry(),
                  "mutex_unlock->wrong thread (%s != %s)\n",
                  m->blocker.thread->name,
                  __running_self_entry()->name);

    if(m->recursion > 0)
    {
        /* this thread still owns lock */
        m->recursion--;
        return;
    }

    /* lock out other cores */
    corelock_lock(&m->cl);

    /* transfer to next queued thread if any */
    struct thread_entry *thread = WQ_THREAD_FIRST(&m->queue);
    if(LIKELY(thread == NULL))
    {
        /* no threads waiting - open the lock */
        m->blocker.thread = NULL;
        corelock_unlock(&m->cl);
        return;
    }

    const int oldlevel = disable_irq_save();
    /* Tranfer of owning thread is handled in the wakeup protocol
     * if priorities are enabled otherwise just set it from the
     * queue head. */
#ifndef HAVE_PRIORITY_SCHEDULING
    m->blocker.thread = thread;
#endif
    unsigned int result = wakeup_thread(thread, WAKEUP_TRANSFER);
    restore_irq(oldlevel);

    corelock_unlock(&m->cl);

#ifdef HAVE_PRIORITY_SCHEDULING
    if(result & THREAD_SWITCH)
        switch_thread();
#endif
    (void)result;
}
コード例 #7
0
ファイル: osl.c プロジェクト: PyroOS/Pyro
acpi_status
acpi_os_execute(
	u32			priority,
	acpi_osd_exec_callback	function,
	void			*context)
{
	acpi_status 		status = AE_OK;
	struct acpi_os_dpc	*dpc;
	thread_id id;
	

	ACPI_FUNCTION_TRACE ("os_queue_for_execution");

	ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context));

	if (!function)
		return_ACPI_STATUS (AE_BAD_PARAMETER);

	/*
	 * Allocate/initialize DPC structure.  Note that this memory will be
	 * freed by the callee.  The kernel handles the tq_struct list  in a
	 * way that allows us to also free its memory inside the callee.
	 * Because we may want to schedule several tasks with different
	 * parameters we can't use the approach some kernel code uses of
	 * having a static tq_struct.
	 * We can save time and code by allocating the DPC and tq_structs
	 * from the same memory.
	 */

	dpc = kmalloc(sizeof(struct acpi_os_dpc), MEMF_KERNEL | MEMF_CLEAR );
	if (!dpc)
		return_ACPI_STATUS (AE_NO_MEMORY);

	dpc->function = function;
	dpc->context = context;
	
	id = spawn_kernel_thread( "acpi_task", acpi_os_execute_deferred,
				     0, 4096, dpc );
	wakeup_thread( id, false );

	return_ACPI_STATUS ( status );
}