Пример #1
0
static int find_tid(phantom_thread_t *t)
{
    int maxtries = MAX_THREADS;
    int ie = hal_save_cli();
    hal_spin_lock( &tid_lock );
    do {
        if( phantom_kernel_threads[last_tid] == 0 )
        {
            phantom_kernel_threads[last_tid] = t;
            hal_spin_unlock( &tid_lock );
            if(ie) hal_sti();
            return last_tid;
        }

        last_tid++;

        if(last_tid >= MAX_THREADS)
            last_tid = 2;

        if(maxtries-- < 0)
        {
            hal_spin_unlock( &tid_lock );
            if(ie) hal_sti();
            panic("out of threads");
            return -1; //not reached, panic
        }
    } while(1);
}
Пример #2
0
static errno_t startSync( phantom_disk_partition_t *p, void *to, long blockNo, int nBlocks, int isWrite )
{
    assert( p->block_size < PAGE_SIZE );
    SHOW_FLOW( 3, "blk %d", blockNo );

    pager_io_request rq;

    pager_io_request_init( &rq );

    rq.phys_page = (physaddr_t)phystokv(to); // why? redundant?
    rq.disk_page = blockNo;

    rq.blockNo = blockNo;
    rq.nSect   = nBlocks;

    rq.rc = 0;

    if(isWrite) rq.flag_pageout = 1;
    else rq.flag_pagein = 1;

    STAT_INC_CNT(STAT_CNT_BLOCK_SYNC_IO);
    STAT_INC_CNT( STAT_CNT_DISK_Q_SIZE ); // Will decrement on io done

    void *va;
    hal_pv_alloc( &rq.phys_page, &va, nBlocks * p->block_size );

    errno_t ret = EINVAL;

    if(isWrite) memcpy( va, to, nBlocks * p->block_size );

    int ei = hal_save_cli();
    hal_spin_lock(&(rq.lock));
    rq.flag_sleep = 1; // Don't return until done
    rq.sleep_tid = GET_CURRENT_THREAD()->tid;

    SHOW_FLOW0( 3, "start io" );
    if( (ret = p->asyncIo( p, &rq )) )
    {
        rq.flag_sleep = 0;
        hal_spin_unlock(&(rq.lock));
        if( ei ) hal_sti();
        //return ret;
        goto ret;
    }
    thread_block( THREAD_SLEEP_IO, &(rq.lock) );
    SHOW_FLOW0( 3, "unblock" );
    if( ei ) hal_sti();

    if(!isWrite) memcpy( to, va, nBlocks * p->block_size );
    ret = rq.rc;

    //return partAsyncIo( p, &rq );
    //return p->asyncIo( p, rq );


ret:
    hal_pv_free( rq.phys_page, va, nBlocks * p->block_size );
    return ret;
}
Пример #3
0
void hal_wired_spin_unlock(hal_spinlock_t *l)
{
    hal_spin_unlock(l);
    hal_enable_preemption();
    unwire_page_for_addr( l, sizeof( hal_spinlock_t ) );
    if( l->ei ) hal_sti();
}
Пример #4
0
void hal_irq_free( int irq, void (*func)(void *arg), void *_arg )
{
    if( irq < 0 && irq >= MAX_IRQ_COUNT )
        panic("IRQ %d > max %d", irq, MAX_IRQ_COUNT-1 );

    // Queue manipulationt is not atomic, will break
    // if interrupt is executed on other CPU.
    // That is why we disable irq on controller before doing this.
    board_interrupt_disable(irq);

    struct handler_q *it;

    int ie = hal_save_cli();
    hal_spin_lock(&irq_list_lock);
    queue_iterate(&heads[irq], it, struct handler_q *, chain)
    {
        if( it->ihandler == func && it->arg == _arg )
        {
            queue_remove(&heads[irq], it, struct handler_q *, chain);


            if(!queue_empty( &heads[irq] ))
                board_interrupt_enable(irq);

            // TODO free struct!

            hal_spin_unlock(&irq_list_lock);
            if(ie) hal_sti();
            return;
        }
    }
Пример #5
0
tid_t get_next_tid(tid_t tid, phantom_thread_t *out)
{
    tid_t ret = tid+1;
    int ie = hal_save_cli();
    hal_spin_lock( &tid_lock );

    while( phantom_kernel_threads[ret] == 0 )
    {
        ret++;

        if(ret >= MAX_THREADS)
        {
            ret = -1;
            goto finish;
        }
    }

    if(out)
        *out = *phantom_kernel_threads[ret];

finish:
    hal_spin_unlock( &tid_lock );
    if(ie) hal_sti();

    return ret;
}
Пример #6
0
struct pvm_object     pvm_create_weakref_object(struct pvm_object owned )
{
    if(owned.data->_satellites.data != 0)
        return owned.data->_satellites;

    struct pvm_object ret = pvm_object_create_fixed( pvm_get_weakref_class() );
    struct data_area_4_weakref *da = (struct data_area_4_weakref *)ret.data->da;

    // Interlocked to make sure no races can happen
    // (ref ass'ment seems to be non-atomic)

#if WEAKREF_SPIN
    wire_page_for_addr( &da->lock );
    int ie = hal_save_cli();
    hal_spin_lock( &da->lock );
#else
    hal_mutex_lock( &da->mutex );
#endif

    // No ref inc!
    da->object = owned;
    owned.data->_satellites = ret;

#if WEAKREF_SPIN
    hal_spin_unlock( &da->lock );
    if( ie ) hal_sti();
    unwire_page_for_addr( &da->lock );
#else
    hal_mutex_unlock( &da->mutex );
#endif

    return ret;
}
//! \brief Turn off the speaker.
void nosound()
{
    int ie = hal_save_cli();

    outb(0x61, inb(0x61) & 0xFC);

    if(ie) hal_sti();
}
Пример #8
0
void board_interrupts_disable_all(void)
{
    int ie = hal_save_cli();

    arch_interrupts_disable_all();

    if(ie) hal_sti();
}
Пример #9
0
void board_sched_cause_soft_irq(void)
{
    int ie = hal_save_cli();

    asm volatile("swi 0xFFF");

    //phantom_scheduler_soft_interrupt();

    if(ie) hal_sti();
}
Пример #10
0
void board_interrupt_disable(int irq)
{
    int ie = hal_save_cli();

    if(irq < MIPS_ONCPU_INTERRUPTS)
        arch_interrupt_disable(irq);
    else
    {
        SHOW_ERROR( 0, "unimpl irq %d", irq );
    }

    if(ie) hal_sti();
}
Пример #11
0
void board_interrupt_enable(int irq)
{
    //assert_interrupts_disabled();
    int ie = hal_save_cli();

    if(irq < MIPS_ONCPU_INTERRUPTS)
        arch_interrupt_enable(irq);
    else
    {
#warning todo
        SHOW_ERROR( 0, "unimpl irq %d", irq );
    }

    if(ie) hal_sti();
}
Пример #12
0
static dpc_request *    dpc_request_find()
{
    int ie = hal_save_cli();
    spinlock_lock( &dpc_request_lock, "dpc find" );
    SHOW_FLOW0( 2, "DPC find... ");

    dpc_request *i;
    for( i = dpc_request_first; i; i = i->next )
    {
        //if( !i->go ) return; // q must be sorted...
        if( !i->go ) continue;
        i->go = 0;

        SHOW_FLOW0( 2, "DPC found\n");
        spinlock_unlock( &dpc_request_lock, "dpc find" );
        if(ie) hal_sti();
        return i;
    }

    SHOW_FLOW0( 2, "DPC not found\n");
    spinlock_unlock( &dpc_request_lock, "dpc find" );
    if(ie) hal_sti();
    return 0;
}
Пример #13
0
errno_t hal_irq_alloc( int irq, void (*func)(void *arg), void *_arg, int is_shareable )
{
    if( irq < 0 && irq >= MAX_IRQ_COUNT )
        panic("IRQ %d > max %d", irq, MAX_IRQ_COUNT-1 );

    if( (!is_shareable) && !queue_empty( &heads[irq] ) )
    {
        printf("IRQ %d asked exculsive, but other user exist", irq);
        return EMLINK;
    }

    if( is_shareable && !queue_empty( &heads[irq] ) && (!((struct handler_q *)queue_first( &heads[irq] ))->is_shareable) )
    {
        printf("IRQ %d asked shared, but already exclusive", irq);
        return EMLINK;
    }


    struct handler_q *out = malloc(sizeof(struct handler_q));
    if(out == 0)
        return ENOMEM;

    out->ihandler = func;
    out->arg = _arg;
    out->is_shareable = is_shareable;

    // mask off IRQ when modifying it's handlers list
    board_interrupt_disable(irq);

    int ie = hal_save_cli();
    hal_spin_lock(&irq_list_lock);
    // Queue insert is not atomic, will break
    // if interrupt is executed on other CPU.
    // That is why we disable irq on controller before doing this.
    queue_enter(&heads[irq], out, struct handler_q *, chain);

    // Do it in spinlock to avoid races with disable_irq
    board_interrupt_enable(irq);

    hal_spin_unlock(&irq_list_lock);
    if(ie) hal_sti();

    return 0;
}
//! \brief Start a sound using the speaker.
//! \param frequency The frequency of the sound.
void sound(u_int32_t frequency)
{
    int ie;
    u_int32_t div;

    if( (frequency<19) || (frequency>22000) )
        return;

    div = TIMER_FREQ / frequency;

    ie = hal_save_cli();

    outb( 0x61, inb(0x61) | 3 );
    outb( TIMER_MODE, 0xB6);
    outb( TIMER2, div & 0xFF);
    outb( TIMER2, div >> 8);

    if(ie) hal_sti();
}
Пример #15
0
struct pvm_object pvm_weakref_get_object(struct pvm_object wr )
{
    struct data_area_4_weakref *da = pvm_object_da( wr, weakref );
    struct pvm_object out;

    // still crashes :(

    // HACK HACK HACK BUG - wiring target too. TODO need wire size parameter for page cross situations!
    wire_page_for_addr( &(da->object) );
    wire_page_for_addr( da->object.data );

    // All we do is return new reference to our object,
    // incrementing refcount before

#if WEAKREF_SPIN
    wire_page_for_addr( &da->lock );
    int ie = hal_save_cli();
    hal_spin_lock( &da->lock );
#else
    hal_mutex_lock( &da->mutex );
#endif

    // TODO should we check refcount before and return null if zero?
    if( 0 == da->object.data->_ah.refCount )
        printf("zero object in pvm_weakref_get_object\n");

    out = ref_inc_o( da->object );

#if WEAKREF_SPIN
    hal_spin_unlock( &da->lock );
    if( ie ) hal_sti();
    unwire_page_for_addr( &da->lock );
#else
    hal_mutex_unlock( &da->mutex );
#endif

    unwire_page_for_addr( da->object.data );
    unwire_page_for_addr( &(da->object) );

    return out;
}
Пример #16
0
void
phantom_import_main_thread()
{
    phantom_thread_t *t = calloc(1, sizeof(phantom_thread_t));

    // Can't be run yet
    t->sleep_flags = THREAD_SLEEP_LOCKED; 

    int ie = hal_save_cli();
    hal_spin_lock( &tid_lock );
    t->tid = 1;
    assert(phantom_kernel_threads[t->tid] == 0);

    phantom_kernel_threads[t->tid] = t;
    hal_spin_unlock( &tid_lock );
    if(ie) hal_sti();

    // This is not exactly safe! phantom_thread_state_init pushes some stuff on stack and can overrite something
    //t->stack_size = phantom_start_stack_size;
    //t->stack = phantom_start_stack_start;

    //assert(t->stack != 0);

    common_thread_init(t, DEF_STACK_SIZE );

    t->start_func_arg = 0;
    t->start_func = 0;

    phantom_thread_state_init(t);

    t->thread_flags |= THREAD_FLAG_UNDEAD;

    // Let it be elegible to run
    t->sleep_flags &= ~THREAD_SLEEP_LOCKED;

    //GET_CURRENT_THREAD() = t;
    SET_CURRENT_THREAD(t);

    t_current_set_name("Main");

}
Пример #17
0
void
phantom_thread_c_starter(void)
{
    void (*func)(void *);
    void *arg;
    phantom_thread_t *t;

    t = GET_CURRENT_THREAD();
    arg = t->start_func_arg;
    func = t->start_func;

    // Thread switch locked it before switching into us, we have to unlock
    hal_spin_unlock(&schedlock);

#if DEBUG
    printf("---- !! phantom_thread_c_starter !! ---\n");
#endif
    t->cpu_id = GET_CPU_ID();

    arch_float_init();

    // We're first time running here, set arch specific things up
    // NB!! BEFORE enablings interrupts!
    arch_adjust_after_thread_switch(t);


    hal_sti(); // Make sure new thread is started with interrupts on

#if 0 // usermode loader does it himself
    if( THREAD_FLAG_USER & t->thread_flags )
    {
        //switch_to_user_mode();
    }
#endif


    func(arg);
    t_kill_thread( t->tid );
    panic("thread %d returned from t_kill_thread", t->tid );
}
Пример #18
0
int mips_irq_dispatch(struct trap_state *ts, u_int32_t pending)
{
    unsigned mask = mips_read_cp0_status();
    mask >>= 8;
    mask &= 0xFF;

    SHOW_FLOW( 8, "irq pending %x mask %x", pending, mask );

    pending &= mask;

    // Have software IRQ requests? Clear 'em BEFORE servicing,
    // or they'll fire again as soon as interrupts are open
    if( pending & 0x3 )
    {
        int ie = hal_save_cli();

        unsigned int cause = mips_read_cp0_cause();
        cause &= ~(0x3 << 8); // reset software irq 0 & 1
        mips_write_cp0_cause( cause );

        if(ie) hal_sti();
    }


    u_int32_t   irqs = pending;

    int nirq = 0;
    while( irqs )
    {
        if( irqs & 0x1 )
            process_irq(ts, nirq);

        irqs >>= 1;
        nirq++;
    }


    return 0; // We're ok
}
Пример #19
0
// finds a free message and gets it
// NOTE: has side effect of disabling interrupts
static void find_free_message(struct smp_msg **msg)
{

retry:
    while(free_msg_count <= 0)
        ;
    int ie = hal_save_cli();
    hal_spin_lock(&free_msg_spinlock);

    if(free_msg_count <= 0) {
        // someone grabbed one while we were getting the lock,
        // go back to waiting for it
        hal_spin_unlock(&free_msg_spinlock);
        if(ie) hal_sti();
        goto retry;
    }

    *msg = free_msgs;
    free_msgs = (*msg)->next;
    free_msg_count--;

    hal_spin_unlock(&free_msg_spinlock);
}