int wtty_read(wtty_t *w, char *data, int cnt, bool nowait) { int done = 0; assert(w); hal_mutex_lock(&w->mutex); SHOW_FLOW( 11, "wtty rd %p", w ); if(!w->started) { done = -EPIPE; goto exit; } while( cnt > 0 ) { if( nowait && _wtty_is_empty(w) ) break; if(!w->started) goto exit; while( _wtty_is_empty(w) ) { hal_cond_broadcast( &w->wcond ); hal_cond_wait( &w->rcond, &w->mutex ); if(!w->started) goto exit; } *data++ = w->buf[w->getpos++]; done++; cnt--; wtty_wrap(w); } hal_cond_broadcast( &w->wcond ); exit: hal_mutex_unlock(&w->mutex); return done; }
int wtty_write(wtty_t *w, const char *data, int cnt, bool nowait) { int done = 0; assert(w); if(!w->started) return -EPIPE; hal_mutex_lock(&w->mutex); wtty_wrap(w); SHOW_FLOW( 11, "wtty wr %p", w ); while( cnt > 0 ) { if( nowait && _wtty_is_full(w) ) break; while( _wtty_is_full(w) ) { hal_cond_broadcast( &w->rcond ); hal_cond_wait( &w->wcond, &w->mutex ); if(!w->started) goto exit; } w->buf[w->putpos++] = *data++; done++; cnt--; wtty_wrap(w); } hal_cond_broadcast( &w->rcond ); exit: hal_mutex_unlock(&w->mutex); return done; }
void phantom_thread_wait_4_snap( void ) { if(phantom_virtual_machine_stop_request) { SHOW_FLOW0( 4, "VM thread will die now"); hal_exit_kernel_thread(); } SHOW_FLOW0( 5, "VM thread will sleep for snap"); hal_mutex_lock( &interlock_mutex ); phantom_virtual_machine_threads_stopped++; hal_cond_broadcast( &phantom_snap_wait_4_vm_enter ); SHOW_FLOW0( 5, "VM thread reported sleep, will wait now"); //while(phantom_virtual_machine_snap_request) hal_cond_wait( &phantom_vm_wait_4_snap, &interlock_mutex ); SHOW_FLOW0( 5, "VM thread awaken, will report wakeup"); phantom_virtual_machine_threads_stopped--; hal_cond_broadcast( &phantom_snap_wait_4_vm_leave ); hal_mutex_unlock( &interlock_mutex ); SHOW_FLOW0( 5, "VM thread returns to activity"); }
int wtty_getc(wtty_t *w) { int ret; assert(w); SHOW_FLOW( 11, "wtty getc %p", w ); hal_mutex_lock(&w->mutex); if(!w->started) { ret = 0; goto exit; } while(w->getpos == w->putpos) { hal_cond_wait( &w->rcond, &w->mutex ); if(!w->started) { ret = 0; goto exit; } } wtty_wrap(w); ret = w->buf[w->getpos++]; hal_cond_broadcast( &w->wcond ); // signal writers to continue exit: hal_mutex_unlock(&w->mutex); return ret; }
void phantom_snapper_reenable_threads( void ) { SHOW_FLOW0( 5, "Snapper will reenable threads"); hal_mutex_lock( &interlock_mutex ); phantom_virtual_machine_snap_request--; // May wake up now if(phantom_virtual_machine_snap_request > 0) { // I'm not one here hal_mutex_unlock( &interlock_mutex ); return; } SHOW_FLOW( 5, "Snapper sleep request is %d, will broadcast", phantom_virtual_machine_snap_request); hal_cond_broadcast( &phantom_vm_wait_4_snap ); SHOW_FLOW( 5, "Snapper will wait for %d threads to awake", phantom_virtual_machine_threads_stopped); #if VM_SYNC_NOWAIT_BLOCKED while( phantom_virtual_machine_threads_stopped - phantom_virtual_machine_threads_blocked > 0 ) #else while( phantom_virtual_machine_threads_stopped > 0 ) #endif { hal_cond_wait( &phantom_snap_wait_4_vm_leave, &interlock_mutex ); SHOW_FLOW( 5, "Snapper: %d threads still sleep", phantom_virtual_machine_threads_stopped); } hal_mutex_unlock( &interlock_mutex ); SHOW_FLOW0( 5, "Snapper done waiting for awake"); }
void dpc_finish() { dpc_stop_request = 1; // Now wake up DPC worker hal_cond_broadcast( &dpc_thread_sleep_stone ); }
void wtty_clear(wtty_t * w) { assert(w); hal_mutex_lock(&w->mutex); w->getpos = 0; w->putpos = 0; hal_cond_broadcast( &w->wcond ); hal_mutex_unlock(&w->mutex); }
//! Put filled event onto the main event q void ev_put_event(ui_event_t *e) { if(!ev_engine_active) return; // Just ignore SHOW_FLOW(8, "%p", e); hal_mutex_lock( &ev_main_q_mutex ); ev_events_in_q++; queue_enter(&ev_main_event_q, e, struct ui_event *, echain); hal_cond_broadcast( &ev_have_event ); hal_mutex_unlock( &ev_main_q_mutex ); }
void phantom_thread_wake_up( struct data_area_4_thread *thda ) { // TODO of course it is a bottleneck - need separate sync objects for threads // we can't keep usual mutexes in objects for objects are in paged mem and mutex uses // spinlock to run its internals // TODO implement old unix style sleep( var address )/wakeup( var address )? hal_mutex_lock( &interlock_mutex ); thda->sleep_flag--; //if(thda->sleep_flag <= 0) hal_cond_broadcast( &thda->wakeup_cond ); if(thda->sleep_flag <= 0) hal_cond_broadcast( &vm_thread_wakeup_cond ); hal_mutex_unlock( &interlock_mutex ); }
void phantom_thread_sleep_worker( struct data_area_4_thread *thda ) { if(phantom_virtual_machine_stop_request) { SHOW_FLOW0( 5, "VM thread will die now"); hal_exit_kernel_thread(); } SHOW_FLOW0( 5, "VM thread will sleep for sleep"); hal_mutex_lock( &interlock_mutex ); phantom_virtual_machine_threads_stopped++; hal_cond_broadcast( &phantom_snap_wait_4_vm_enter ); //SHOW_FLOW0( 5, "VM thread reported sleep, will wait now"); if( thda->spin_to_unlock ) { VM_SPIN_UNLOCK( (*thda->spin_to_unlock) ); thda->spin_to_unlock = 0; } else { if(thda->sleep_flag) SHOW_ERROR(0, "Warn: vm th (da %x) sleep, no spin unlock requested", thda); } //while(thda->sleep_flag) hal_cond_wait( &(thda->wakeup_cond), &interlock_mutex ); while(thda->sleep_flag) { SHOW_ERROR(0, "Warn: old vm sleep used, th (da %x)", thda); hal_cond_wait( &vm_thread_wakeup_cond, &interlock_mutex ); } // TODO if snap is active someone still can wake us up - resleep for snap then! //SHOW_FLOW0( 5, "VM thread awaken, will report wakeup"); phantom_virtual_machine_threads_stopped--; hal_mutex_unlock( &interlock_mutex ); SHOW_FLOW0( 5, "VM thread awaken"); }
static void deferred_refdec_thread(void *a) { t_current_set_name("RefDec"); t_current_set_priority( THREAD_PRIO_HIGH ); while(!stop) { hal_mutex_lock( &deferred_refdec_mutex ); // TODO timed wait hal_cond_wait( &start_refdec_cond, &deferred_refdec_mutex ); STAT_INC_CNT(DEFERRED_REFDEC_RUNS); // Decide where to switch put pointer int new_put_ptr = REFDEC_BUFFER_HALF + 1; // first one used to check low half overflow // Was in upper page? if( refdec_put_ptr >= REFDEC_BUFFER_HALF ) new_put_ptr = 0; //int last_pos = atomic_set( &refdec_put_ptr, new_put_ptr); int last_pos = ATOMIC_FETCH_AND_SET( &refdec_put_ptr, new_put_ptr); int start_pos = (last_pos >= REFDEC_BUFFER_HALF) ? REFDEC_BUFFER_HALF+1 : 0; // Check that all VM threads are either sleep or passed an bytecode instr boundary phantom_check_threads_pass_bytecode_instr_boundary(); int pos; for( pos = start_pos; pos < last_pos; pos++ ) { pvm_object_storage_t volatile *os; os = refdec_buffer[pos]; assert( os->_ah.refCount > 0); do_ref_dec_p((pvm_object_storage_t *)os); } hal_cond_broadcast( &end_refdec_cond ); hal_mutex_unlock( &deferred_refdec_mutex ); } }
static __inline__ void wtty_doputc(wtty_t *w, int c) { w->buf[w->putpos++] = c; hal_cond_broadcast( &w->rcond ); }
// all blocked calls return void wtty_stop(wtty_t * w) { w->started = 0; hal_cond_broadcast( &w->rcond ); hal_cond_broadcast( &w->wcond ); }