Exemplo n.º 1
0
int usys_kill(int *err, uuprocess_t *u, int pid, int sig)
{
    (void) u;

    hal_mutex_lock(&proc_lock);

    //panic("kill is not implemented");
    uuprocess_t * p = proc_by_pid(pid);

    if(!p)
    {
        hal_mutex_unlock(&proc_lock);
        *err = ESRCH;
        return -1;
    }

    //int ret = 0;
    sig_send( &p->signals, sig );

    hal_mutex_unlock(&proc_lock);

    //if(ret)        *err = EINVAL;
    //return ret;
    return 0;
}
Exemplo n.º 2
0
static errno_t rm_mount( const char* name, int flags )
{
    (void) flags;

    if( *name == '/' ) name++;

    errno_t rc = unlink_dir_name( &root_root, name );
    if( rc )
    {
        SHOW_ERROR( 1, "can't unlink %s", name );
        return rc;
    }

    hal_mutex_lock ( &mm );

    int i;
    for( i = 0; i < FS_MAX_MOUNT; i++ )
    {
        // unused
        if( mount[i].fs == 0 )
            continue;

        if( 0 == strcmp( mount[i].path, name ) )
        {
            mount[i].fs = 0;
            hal_mutex_unlock ( &mm );
            return 0;
        }

    }

    hal_mutex_unlock ( &mm );
    return ENOENT;
}
Exemplo n.º 3
0
void phantom_snapper_reenable_threads( void )
{
    SHOW_FLOW0( 5, "Snapper will reenable threads");
    hal_mutex_lock( &interlock_mutex );
    phantom_virtual_machine_snap_request--; // May wake up now

    if(phantom_virtual_machine_snap_request > 0)
    {
        // I'm not one here
        hal_mutex_unlock( &interlock_mutex );
        return;
    }

    SHOW_FLOW( 5, "Snapper sleep request is %d, will broadcast", phantom_virtual_machine_snap_request);

    hal_cond_broadcast( &phantom_vm_wait_4_snap );


    SHOW_FLOW( 5, "Snapper will wait for %d threads to awake", phantom_virtual_machine_threads_stopped);

#if VM_SYNC_NOWAIT_BLOCKED
    while( phantom_virtual_machine_threads_stopped - phantom_virtual_machine_threads_blocked > 0 )
#else
    while( phantom_virtual_machine_threads_stopped > 0 )
#endif
    {
        hal_cond_wait( &phantom_snap_wait_4_vm_leave, &interlock_mutex );
        SHOW_FLOW( 5, "Snapper: %d threads still sleep", phantom_virtual_machine_threads_stopped);

    }
    hal_mutex_unlock( &interlock_mutex );

    SHOW_FLOW0( 5, "Snapper done waiting for awake");

}
Exemplo n.º 4
0
static void trfs_resend_thread(void *arg)
{
    (void) arg;

    t_current_set_name("TRFS Resend");

    while(1)
    {
        hal_sleep_msec( 1000 ); // between bursts
        hal_mutex_lock(&lock);

        trfs_queue_t *first = 0;

    again:
        if(queue_empty(&requests))
        {
            hal_mutex_unlock(&lock);
            continue;
        }

        trfs_queue_t *elt;

        // Move to end
        queue_remove_first( &requests, elt, trfs_queue_t *, chain );
        queue_enter(&requests, elt, trfs_queue_t *, chain);

        if( elt == first )
        {
            hal_mutex_unlock(&lock);
            continue;
        }
        first = elt;

        if( TRFS_NEED_RESEND(elt) )
        {
            if( elt->resend_count++ > MAX_RETRY )
            {
                queue_remove( &requests, elt, trfs_queue_t *, chain );
                //elt->orig_request->flag_ioerror = 1;
                elt->orig_request->rc = ETIMEDOUT;
                trfs_signal_done(elt);
            }
            else
            {
                SHOW_FLOW0( 1, "rerequest" );
                sendRequest(elt);
                hal_sleep_msec( 100 ); // between packets
            }
            goto again;
        }
Exemplo n.º 5
0
// Add thread to process.
errno_t uu_proc_add_thread( int pid, int tid )
{
    errno_t rc = 0;

    assert( tid >= 0 );
    assert( pid > 0 );

/*
    // TODO t_set_pid
    phantom_thread_t *t = get_thread(tid);
    assert( t );
    assert( t->u == 0 );
*/

    hal_mutex_lock(&proc_lock);

    rc = t_set_pid( tid, pid );
    if( rc ) goto finish;

    uuprocess_t * p = proc_by_pid(pid);
    if( !p )
    {
        hal_mutex_unlock(&proc_lock);
        return ESRCH;
    }

    int done = 0;

    int i;
    for( i = 0; i < MAX_UU_TID; i++ )
    {
        if( p->tids[i] >= 0 )
            continue;

        p->tids[i] = tid;
        p->ntids++;
        done = 1;
        break;
    }

    //t->u = p;
    t_current_set_death_handler(uu_proc_thread_kill);

    if(!done) panic("out of thread slots for proc");

finish:
    hal_mutex_unlock(&proc_lock);
    return rc;
}
Exemplo n.º 6
0
void run_gc()
{
    int my_run = gc_n_run;

    //hal_mutex_lock( &alloc_mutex );
    if(vm_alloc_mutex) hal_mutex_lock( vm_alloc_mutex );  // TODO avoid Giant lock

    if (my_run != gc_n_run) // lock acquired when concurrent gc run finished
    {
        if(vm_alloc_mutex) hal_mutex_unlock( vm_alloc_mutex );  // TODO avoid Giant lock
        //hal_mutex_unlock( &alloc_mutex );
        return;
    }
    gc_n_run++;

    gc_flags_last_generation++; // bump generation
    if (gc_flags_last_generation == 0)  gc_flags_last_generation++;  // != 0 'cause allocation reset gc_flags to zero

    //phantom_virtual_machine_threads_stopped++; // pretend we are stopped
    //TODO: refine sinchronization

    if (debug_memory_leaks) pvm_memcheck();  // visualization
    if (debug_memory_leaks) printf("gc started...  ");


    cycle_root_buffer_clear(); // so two types of gc could coexists


    // First pass - tree walk, mark visited.
    //
    // Root is always used. All other objects, including pvm_root and pvm_root.threads_list, should be reached from root...
    mark_tree( get_root_object_storage() );


    // Second pass - linear walk to free unused objects.
    //
    int freed = free_unmarked();

    if ( freed > 0 )
       printf("\ngc: %i objects freed\n", freed);

    if (debug_memory_leaks) printf("gc finished!\n");
    if (debug_memory_leaks) pvm_memcheck();  // visualization

    //TODO refine synchronization
    //phantom_virtual_machine_threads_stopped--;
    //hal_mutex_unlock( &alloc_mutex );
    if(vm_alloc_mutex) hal_mutex_unlock( vm_alloc_mutex );  // TODO avoid Giant lock
}
Exemplo n.º 7
0
struct pvm_object     pvm_create_weakref_object(struct pvm_object owned )
{
    if(owned.data->_satellites.data != 0)
        return owned.data->_satellites;

    struct pvm_object ret = pvm_object_create_fixed( pvm_get_weakref_class() );
    struct data_area_4_weakref *da = (struct data_area_4_weakref *)ret.data->da;

    // Interlocked to make sure no races can happen
    // (ref ass'ment seems to be non-atomic)

#if WEAKREF_SPIN
    wire_page_for_addr( &da->lock );
    int ie = hal_save_cli();
    hal_spin_lock( &da->lock );
#else
    hal_mutex_lock( &da->mutex );
#endif

    // No ref inc!
    da->object = owned;
    owned.data->_satellites = ret;

#if WEAKREF_SPIN
    hal_spin_unlock( &da->lock );
    if( ie ) hal_sti();
    unwire_page_for_addr( &da->lock );
#else
    hal_mutex_unlock( &da->mutex );
#endif

    return ret;
}
Exemplo n.º 8
0
void phantom_thread_wait_4_snap( void )
{
    if(phantom_virtual_machine_stop_request)
    {
        SHOW_FLOW0( 4, "VM thread will die now");
        hal_exit_kernel_thread();
    }

    SHOW_FLOW0( 5, "VM thread will sleep for snap");
    hal_mutex_lock( &interlock_mutex );

    phantom_virtual_machine_threads_stopped++;
    hal_cond_broadcast( &phantom_snap_wait_4_vm_enter );

    SHOW_FLOW0( 5, "VM thread reported sleep, will wait now");

    //while(phantom_virtual_machine_snap_request)
        hal_cond_wait( &phantom_vm_wait_4_snap, &interlock_mutex );

    SHOW_FLOW0( 5, "VM thread awaken, will report wakeup");
    phantom_virtual_machine_threads_stopped--;
    hal_cond_broadcast( &phantom_snap_wait_4_vm_leave );

    hal_mutex_unlock( &interlock_mutex );
    SHOW_FLOW0( 5, "VM thread returns to activity");
}
Exemplo n.º 9
0
static void t_wait(void *a)
{
    hal_set_current_thread_priority( THREAD_PRIO_HIGH );

    char *name = a;
    while(!thread_stop_request)
    {
        thread_activity_counter++;

        if(TEST_CHATTY) printf("--- thread %s will wait 4 cond ---\n", name);

        hal_mutex_lock(&m);
        checkEnterMutex();
        checkLeaveMutex();
        hal_cond_wait(&c, &m);
        checkEnterMutex();
        checkLeaveMutex();
        hal_mutex_unlock(&m);

        if(TEST_CHATTY) printf("--- thread %s runs ---\n", name);
        //pressEnter("--- thread a runs ---\n");
        YIELD();
    }
    FINISH();
}
Exemplo n.º 10
0
int wtty_read(wtty_t *w, char *data, int cnt, bool nowait)
{
    int done = 0;
    assert(w);
    hal_mutex_lock(&w->mutex);

    SHOW_FLOW( 11, "wtty rd %p", w );
    if(!w->started) { done = -EPIPE; goto exit; }

    while( cnt > 0 )
    {
        if( nowait && _wtty_is_empty(w) )
            break;

        if(!w->started) goto exit;

        while( _wtty_is_empty(w) )
        {
            hal_cond_broadcast( &w->wcond );
            hal_cond_wait( &w->rcond, &w->mutex );
            if(!w->started) goto exit;
        }

        *data++ = w->buf[w->getpos++];
        done++;
        cnt--;
        wtty_wrap(w);
    }

    hal_cond_broadcast( &w->wcond );
exit:
    hal_mutex_unlock(&w->mutex);
    return done;
}
Exemplo n.º 11
0
int wtty_write(wtty_t *w, const char *data, int cnt, bool nowait)
{
    int done = 0;

    assert(w);
    if(!w->started) return -EPIPE;

    hal_mutex_lock(&w->mutex);
    wtty_wrap(w);

    SHOW_FLOW( 11, "wtty wr %p", w );

    while( cnt > 0 )
    {
        if( nowait && _wtty_is_full(w) )
            break;

        while( _wtty_is_full(w) )
        {
            hal_cond_broadcast( &w->rcond );
            hal_cond_wait( &w->wcond, &w->mutex );
            if(!w->started) goto exit;
        }

        w->buf[w->putpos++] = *data++;
        done++;
        cnt--;
        wtty_wrap(w);
    }

    hal_cond_broadcast( &w->rcond );
exit:
    hal_mutex_unlock(&w->mutex);
    return done;
}
Exemplo n.º 12
0
errno_t wtty_putc_nowait(wtty_t *w, int c)
{
    int ret = 0;

    assert(w);
    if(!w->started) return EPIPE;

    hal_mutex_lock(&w->mutex);
    wtty_wrap(w);

    SHOW_FLOW( 11, "wtty putc %p", w );

    if( _wtty_is_full(w) )
    {
        SHOW_ERROR0( 10, "wtty putc fail" );
        ret = ENOMEM;
    }
    else
    {
        wtty_doputc(w, c);
    }

    hal_mutex_unlock(&w->mutex);
    return ret;
}
Exemplo n.º 13
0
int usys_waitpid(int *err, uuprocess_t *u, int pid, int *status, int options)
{
    int retpid = -1;

    (void) status;
    (void) options;

    hal_mutex_lock(&proc_lock);

    if( pid <= 0 )
    {
        *err = EINVAL;
            retpid = -1;
    }
    else
    {
        uuprocess_t * p = proc_by_pid(pid);
        if( p == 0 || (p->ppid != u->pid ) )
        {
            *err = ECHILD;
            retpid = -1;
            goto finish;
        }

    }
#warning impl
finish:

    hal_mutex_unlock(&proc_lock);


    return retpid;
}
Exemplo n.º 14
0
int wtty_getc(wtty_t *w)
{
    int ret;

    assert(w);
    SHOW_FLOW( 11, "wtty getc %p", w );

    hal_mutex_lock(&w->mutex);

    if(!w->started) { ret = 0; goto exit; }

    while(w->getpos == w->putpos)
    {
        hal_cond_wait( &w->rcond, &w->mutex );
        if(!w->started) { ret = 0; goto exit; }
    }

    wtty_wrap(w);

    ret = w->buf[w->getpos++];
    hal_cond_broadcast( &w->wcond ); // signal writers to continue

exit:
    hal_mutex_unlock(&w->mutex);
    return ret;
}
Exemplo n.º 15
0
// This is a very simple impl
static uufs_t * find_mount( const char* name, char *namerest )
{
    uufs_t *ret = 0;
    int maxlen = 0;
    const char *m_path;
    const char *m_name;

    if( *name == '/' ) name++;

    // lock modifications!
    hal_mutex_lock( &mm );

    int i;
    for( i = 0; i < FS_MAX_MOUNT; i++ )
    {
        // unused
        if( mount[i].fs == 0 )
            continue;

        int mplen = strlen( mount[i].path );
        if( mplen <= 0 )
            continue;

        //SHOW_FLOW( 6, "find mount '%s'", name );
        if(
           ( 0 == strncmp( name, mount[i].path, mplen ) ) &&
           ((name[mplen] == '/') || (name[mplen] == '\0'))
          )
        {
            if( mplen > maxlen )
            {
                maxlen = mplen;
                ret = mount[i].fs;
                m_path = mount[i].path;
                m_name = mount[i].name;
            }
        }
    }

    hal_mutex_unlock( &mm );

    if( ret )
    {
        // Skip final /
        if( '/' == *(name+maxlen) )
            maxlen++;
        // part of name after the mount point
        strlcpy( namerest, name+maxlen, FS_MAX_PATH_LEN );

        (void) m_path;
        (void) m_name;
        SHOW_FLOW( 7, "got '%s' (%s) for '%s', rest = '%s'",
                   m_path, m_name,
                   name, namerest
                 );
    }

    return ret;
}
Exemplo n.º 16
0
//! Return (and clear!) unused event to unused q
void ev_return_unused(ui_event_t *e)
{
    assert(ev_engine_active);
    memset( e, 0, sizeof(struct ui_event) );
    hal_mutex_lock( &ev_unused_q_mutex );
    queue_enter(&ev_unused_events, e, struct ui_event *, echain);
    hal_mutex_unlock( &ev_unused_q_mutex );
}
Exemplo n.º 17
0
// TODO lock!
static
errno_t add_mount( const char* path, const char *name, uufs_t *fs )
{
    if( *path == '/' ) path++;

    // NB! path must finish with /

    if( strlen( path ) >= FS_MAX_MOUNT_PATH-1 )
        return E2BIG;

    if( strlen( name ) >= FS_MAX_MOUNT_PATH-1 )
        return E2BIG;

    hal_mutex_lock( &mm );

    int i;
    for( i = 0; i < FS_MAX_MOUNT; i++ )
    {
        // unused
        if( mount[i].fs != 0 )
            continue;
        goto found;
    }
    hal_mutex_unlock( &mm );
    return ENFILE;

found:
    mount[i].fs = fs;
    strlcpy( mount[i].path, path, FS_MAX_MOUNT_PATH );
    strlcpy( mount[i].name, name, FS_MAX_MOUNT_PATH );

    //if( mount[i].path[strlen(mount[i].path) - 1] != '/' )
    //    strcat(mount[i].path, "/" );

    // Kill final slash
    int rlen = strlen(mount[i].path);
    if( mount[i].path[rlen - 1] == '/' )
        mount[i].path[rlen - 1] = 0;

    lookup_dir( &root_root, mount[i].path, 1, create_dir );

    hal_mutex_unlock( &mm );

    return 0;
}
Exemplo n.º 18
0
void wtty_clear(wtty_t * w)
{
    assert(w);
    hal_mutex_lock(&w->mutex);
    w->getpos = 0;
    w->putpos = 0;
    hal_cond_broadcast( &w->wcond );
    hal_mutex_unlock(&w->mutex);
}
Exemplo n.º 19
0
//! Put filled event onto the main event q
void ev_put_event(ui_event_t *e)
{
    if(!ev_engine_active) return; // Just ignore

    SHOW_FLOW(8, "%p", e);
    hal_mutex_lock( &ev_main_q_mutex );
    ev_events_in_q++;
    queue_enter(&ev_main_event_q, e, struct ui_event *, echain);
    hal_cond_broadcast( &ev_have_event );
    hal_mutex_unlock( &ev_main_q_mutex );
}
Exemplo n.º 20
0
int uu_create_process( int ppid )
{
    hal_mutex_lock(&proc_lock);
    uuprocess_t *p = get_proc();
    assert(p);

    memset( p, 0, sizeof(uuprocess_t) );

    p->pid = get_pid();

    p->ppid = p->pid;
    p->pgrp_pid = p->pid;
    p->sess_pid = p->pid;

    p->uid = p->euid = p->gid = p->egid = 0; // Let it be root at start
    p->umask = 0664;

    int i;
    for( i = 0; i < MAX_UU_TID; i++ )
        p->tids[i] = -1;

    uuprocess_t * parent = proc_by_pid(ppid);
    if( parent )
    {
        p->ppid = ppid;
        p->pgrp_pid = parent->pgrp_pid;
        p->sess_pid = parent->sess_pid;
        p->ctty = parent->ctty;
        p->cwd_file = copy_uufile( parent->cwd_file );
        memcpy( p->cwd_path, parent->cwd_path, FS_MAX_PATH_LEN );
        p->umask = parent->umask;
    }
    else
    {
        //reopen_stdioe( p, "/dev/tty" );
        strlcpy( p->cwd_path, "/", FS_MAX_PATH_LEN );
    }

    sig_init( &(p->signals) );


    // Mostly created, do final things


    SHOW_FLOW( 11, "ctty %p", p->ctty );

    // allways, while there is no files inherited
    reopen_stdioe( p, "/dev/tty" );



    hal_mutex_unlock(&proc_lock);
    return p->pid;
};
Exemplo n.º 21
0
//! Count events on unused Q
static int ev_count_unused()
{
    int count = 0;

    hal_mutex_lock( &ev_unused_q_mutex );

    if(queue_empty(&ev_unused_events))
    {
        hal_mutex_unlock( &ev_unused_q_mutex );
        return 0;
    }

    struct ui_event *e;
    queue_iterate(&ev_unused_events, e, struct ui_event *, echain)
    {
        count++;
    }
    hal_mutex_unlock( &ev_unused_q_mutex );

    return count;
}
Exemplo n.º 22
0
trfs_queue_t *findRequest( trfs_fio_t *recvFio, u_int32_t type )
{
    SHOW_FLOW( 6, "look for req such as fid %d ioid %d nSect %d start %ld", recvFio->fileId, recvFio->ioId, recvFio->nSectors, recvFio->startSector);

    trfs_queue_t *elt;
    hal_mutex_lock(&lock);
    queue_iterate( &requests, elt, trfs_queue_t *, chain)
    {
        if( elt->type != type )
            continue;

        if(
           (elt->fio.fileId != recvFio->fileId) ||
           (elt->fio.ioId != recvFio->ioId)
          )
            continue;

        u_int64_t our_start = elt->fio.startSector;
        u_int64_t our_end = our_start + elt->fio.nSectors; // one after

        if( (recvFio->startSector < our_start) || (recvFio->startSector >= our_end) )
        {
            SHOW_ERROR( 0, "reply is out of req bounds, our %ld to %ld, got %ld", our_start, our_end, recvFio->startSector );
            continue;
        }

        u_int64_t his_end = recvFio->startSector + recvFio->nSectors;

        if( his_end > our_end )
            SHOW_ERROR( 0, "warning: reply brought too many sectors (%ld against %ld)", his_end, our_end );

        hal_mutex_unlock(&lock);
        return elt;
    }

    hal_mutex_unlock(&lock);
    return 0;
}
Exemplo n.º 23
0
static void flush_stdout(void * arg)
{
    char text[BUFS + 1];
    (void) arg;

    hal_mutex_lock( &buf_mutex );
    if( cbufpos >= BUFS)
        cbufpos = BUFS;
    cbuf[cbufpos] = '\0';
    memcpy(text, cbuf, cbufpos + 1);
    cbufpos = 0;
    hal_mutex_unlock( &buf_mutex );
    phantom_console_window_puts(text);
}
Exemplo n.º 24
0
// remove (dead) thread from process.
errno_t uu_proc_rm_thread( int pid, int tid )
{
    assert( tid >= 0 );

    hal_mutex_lock(&proc_lock);

    uuprocess_t * p = proc_by_pid(pid);
    if( !p )
    {
        hal_mutex_unlock(&proc_lock);
        return ESRCH;
    }

    assert(p->ntids > 0);

    int done = 0;

    int i;
    for( i = 0; i < MAX_UU_TID; i++ )
    {
        if( p->tids[i] != tid )
            continue;

        p->tids[i] = -1;
        p->ntids--;
        done = 1;
        break;
    }

    if(!done) panic("not proc's thread");

    if( p->ntids <= 0 )
        uu_proc_death(p);

    hal_mutex_unlock(&proc_lock);
    return 0;
}
Exemplo n.º 25
0
void phantom_thread_wake_up( struct data_area_4_thread *thda )
{
    // TODO of course it is a bottleneck - need separate sync objects for threads
    // we can't keep usual mutexes in objects for objects are in paged mem and mutex uses
    // spinlock to run its internals
    // TODO implement old unix style sleep( var address )/wakeup( var address )? 
    hal_mutex_lock( &interlock_mutex );

    thda->sleep_flag--;
    //if(thda->sleep_flag <= 0)        hal_cond_broadcast( &thda->wakeup_cond );
    if(thda->sleep_flag <= 0)
        hal_cond_broadcast( &vm_thread_wakeup_cond );

    hal_mutex_unlock( &interlock_mutex );
}
Exemplo n.º 26
0
void phantom_thread_put_asleep( struct data_area_4_thread *thda, VM_SPIN_TYPE *unlock_spin )
{
    // FIXME can't sleep in spinlock!
    hal_mutex_lock( &interlock_mutex );
    // TODO atomic assign
    if( thda->spin_to_unlock )
        panic( "spin unlock > 1" );

    thda->spin_to_unlock = unlock_spin;
    thda->sleep_flag++;
    hal_mutex_unlock( &interlock_mutex );
    // NB! This will work if called from SYS only! That's
    // ok since no other bytecode instr can call this.
    // Real sleep happens in phantom_thread_sleep_worker
    SHOW_FLOW0( 5, "put thread asleep");
}
Exemplo n.º 27
0
//! Get unused event from unused events queue, or allocate new one.
ui_event_t * ev_get_unused()
{
    struct ui_event *e;

    hal_mutex_lock( &ev_unused_q_mutex );
    if(queue_empty( &ev_unused_events ))
    {
        ev_allocate_event();
    }

    if(queue_empty(&ev_unused_events))
        panic("out of events");

    queue_remove_first(&ev_unused_events, e, struct ui_event *, echain);
    hal_mutex_unlock( &ev_unused_q_mutex );
    return e;
}
Exemplo n.º 28
0
static errno_t cache_do_destroy( cache_t *c )
{

    hal_mutex_lock( &c->lock );
    while(!queue_empty(&c->lru))
    {
        cache_el_t * el;
        //queue_remove_last( &c->lru, el, cache_el_t *, lru );
        queue_remove_first( &c->lru, el, cache_el_t *, lru );
        cache_do_destroy_el(el);

    }
    hal_mutex_unlock( &c->lock );

    hash_uninit(c->hash);
    hal_mutex_destroy( &c->lock );
    return 0;
}
Exemplo n.º 29
0
//! Place data to cache - find or reuse entry as needed
static errno_t cache_do_write( cache_t *c, long blk, const void *data )
{
    assert(!queue_empty(&c->lru));

    errno_t ret = 0;
    hal_mutex_lock( &c->lock );

    cache_el_t * el = cache_do_find( c, blk );
    if( el == 0 )
    {
        el = (cache_el_t *)queue_last(&c->lru);

        // if valid and dirty - must flush!

        hash_remove( c->hash, el );

        el->valid = 1;
        el->blk = blk;

        assert(!hash_insert( c->hash, el));
        SHOW_FLOW( 10, "Cache w miss blk %ld", blk );
    }
    else
    {
        assert(el->valid);
        assert(el->blk == blk);
        SHOW_FLOW( 9, "Cache w _HIT_ blk %ld", blk );
    }

    el->dirty = 1;

    // remove
    queue_remove( &c->lru, el, cache_el_t *, lru );

    // insert at start
    queue_enter_first( &c->lru, el, cache_el_t *, lru );

    memcpy( el->data, data, c->page_size );

//done:
    hal_mutex_unlock( &c->lock );
    return ret;
}
Exemplo n.º 30
0
void phantom_thread_sleep_worker( struct data_area_4_thread *thda )
{
    if(phantom_virtual_machine_stop_request)
    {
        SHOW_FLOW0( 5, "VM thread will die now");
        hal_exit_kernel_thread();
    }

    SHOW_FLOW0( 5, "VM thread will sleep for sleep");
    hal_mutex_lock( &interlock_mutex );

    phantom_virtual_machine_threads_stopped++;
    hal_cond_broadcast( &phantom_snap_wait_4_vm_enter );
    //SHOW_FLOW0( 5, "VM thread reported sleep, will wait now");

    if( thda->spin_to_unlock )
    {
        VM_SPIN_UNLOCK( (*thda->spin_to_unlock) );
        thda->spin_to_unlock = 0;
    }
    else
    {
        if(thda->sleep_flag)
            SHOW_ERROR(0, "Warn: vm th (da %x) sleep, no spin unlock requested", thda);
    }


    //while(thda->sleep_flag)        hal_cond_wait( &(thda->wakeup_cond), &interlock_mutex );
    while(thda->sleep_flag)
    {
        SHOW_ERROR(0, "Warn: old vm sleep used, th (da %x)", thda);
        hal_cond_wait( &vm_thread_wakeup_cond, &interlock_mutex );
    }

// TODO if snap is active someone still can wake us up - resleep for snap then!

    //SHOW_FLOW0( 5, "VM thread awaken, will report wakeup");
    phantom_virtual_machine_threads_stopped--;

    hal_mutex_unlock( &interlock_mutex );
    SHOW_FLOW0( 5, "VM thread awaken");
}