void profiler_register_interrupt_hit( addr_t ip ) { #if 1 // TODO wrong - loop through all CPUs or do it on percpu (on ia32 == APIC) timer. // TODO In fact we must do it on regular timer interrupt - this one costs too much int cpu = GET_CPU_ID(); int idle = percpu_idle_status[cpu]; percpu_idle_count[cpu][idle ? 1 : 0]++; int sum = percpu_idle_count[cpu][0] + percpu_idle_count[cpu][1]; if( sum > 100 ) { int load_percent = percpu_idle_count[cpu][0] * 100 / sum; percpu_idle_count[cpu][0] = percpu_idle_count[cpu][1] = 0; percpu_cpu_load[cpu] = load_percent; percpu_idle_count[cpu][0] = percpu_idle_count[cpu][1] = 0; } #endif if(!profiler_inited) return; ip /= PROFILER_MAP_DIVIDER; if( ip > PROFILER_MAP_SIZE ) { static int warn_over = 0; if(!warn_over) { warn_over = 1; SHOW_ERROR( 0, "profiler ip overflow %p", (void*)(ip*PROFILER_MAP_DIVIDER) ); } return; } map[ip]++; total_count++; #if PROFILER_REGULAR_DUMP_TO_LOG if( (total_count % 10000) == 0 ) profiler_dump_map(); #endif }
void phantom_thread_c_starter(void) { void (*func)(void *); void *arg; phantom_thread_t *t; t = GET_CURRENT_THREAD(); arg = t->start_func_arg; func = t->start_func; // Thread switch locked it before switching into us, we have to unlock hal_spin_unlock(&schedlock); #if DEBUG printf("---- !! phantom_thread_c_starter !! ---\n"); #endif t->cpu_id = GET_CPU_ID(); arch_float_init(); // We're first time running here, set arch specific things up // NB!! BEFORE enablings interrupts! arch_adjust_after_thread_switch(t); hal_sti(); // Make sure new thread is started with interrupts on #if 0 // usermode loader does it himself if( THREAD_FLAG_USER & t->thread_flags ) { //switch_to_user_mode(); } #endif func(arg); t_kill_thread( t->tid ); panic("thread %d returned from t_kill_thread", t->tid ); }
void phantom_scheduler_soft_interrupt(void) { if(panic_reenter) { // Stop all CPUs except for main one - TODO is it right? if(GET_CPU_ID() != 0) while(1) hal_wait_for_interrupt(); return; } //#warning dummy? //putchar("`"); // if curr thread disables preemption or scheduler lock is here, // don't interfere! BUT - if thread has sleep_flags, then we are // called to switch it off, so do it! #if 1 if( (GET_CURRENT_THREAD()->sleep_flags == 0 ) && (hal_is_preemption_disabled() || schedlock.lock) ) return; #else if(schedlock.lock) return; if( (GET_CURRENT_THREAD()->sleep_flags == 0) && hal_is_preemption_disabled() ) return; #endif //if(phantom_scheduler_soft_interrupt_reenter) panic("phantom_scheduler_soft_interrupt_reenter"); phantom_scheduler_soft_interrupt_reenter++; phantom_thread_switch(); phantom_scheduler_soft_interrupt_reenter--; }
static void common_thread_init(phantom_thread_t *t, int stacksize ) { //t->thread_flags = 0; t->priority = THREAD_PRIO_NORM; t->cpu_id = GET_CPU_ID(); #if CONF_NEW_CTTY t_make_ctty( t ); #else if( 0 == t->ctty ) t->ctty = wtty_init( WTTY_SMALL_BUF ); #endif // malloc uses mutex, so we have to use physalloc which is protected with spinlocks physaddr_t pa; t->stack_size = stacksize; //t->stack = calloc( 1, stacksize ); hal_pv_alloc( &pa, &(t->stack), stacksize+PAGE_SIZE ); hal_page_control( pa, t->stack, page_unmap, page_noaccess ); // poor man's guard page - TODO support in page fault t->stack_pa = pa; SHOW_FLOW( 5, "main stk va %p pa %p", t->stack, (void *)pa ); //assert(t->stack != 0); t->kstack_size = stacksize; //t->kstack = calloc( 1, stacksize ); hal_pv_alloc( &pa, &(t->kstack), stacksize+PAGE_SIZE ); hal_page_control( pa, t->kstack, page_unmap, page_noaccess ); // poor man's guard page - TODO support in page fault t->kstack_pa = pa; SHOW_FLOW( 5, "kern stk va %p pa %p", t->kstack, (void *)pa ); #if ARCH_mips // On mips we need unmapped kernel stack for mapping on MIPS is // done with exceptions too and unmapped stack is fault forever. // We achieve this by setting stack virtual address to its // physical address | 0x8000000 - this virt mem area is direct // mapped to physmem at 0 assert( (addr_t)phystokv(t->kstack_pa) > 0x80000000 ); assert( (addr_t)phystokv(t->kstack_pa) < 0xC0000000 ); t->kstack_top = phystokv(t->kstack_pa) +t->kstack_size-4; // Why -4? #else t->kstack_top = t->kstack+t->kstack_size-4; // Why -4? #endif //assert(t->kstack != 0); t->owner = 0; //t->u = 0; t->pid = NO_PID; t->thread_flags = 0;; t->waitcond = 0; hal_spin_init( &(t->waitlock)); queue_init(&(t->chain)); queue_init(&(t->runq_chain)); t->sw_unlock = 0; t->preemption_disabled = 0; }