/* program the timer from the pet thread */ int kperf_timer_pet_set( unsigned timer, uint64_t elapsed_ticks ) { static uint64_t pet_min_ticks = 0; uint64_t now; struct time_trigger *trigger = NULL; uint64_t period = 0; uint64_t deadline; /* compute ns -> ticks */ if( pet_min_ticks == 0 ) nanoseconds_to_absolutetime(MIN_PET_TIMER_NS, &pet_min_ticks); if( timer != pet_timer ) panic( "PET setting with bogus ID\n" ); if( timer >= timerc ) return EINVAL; if( kperf_sampling_status() == KPERF_SAMPLING_OFF ) { BUF_INFO1(PERF_PET_END, SAMPLE_OFF); return 0; } // don't repgram the timer if it's been shutdown if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN ) { BUF_INFO1(PERF_PET_END, SAMPLE_SHUTDOWN); return 0; } /* CHECKME: we probably took so damn long in the PET thread, * it makes sense to take the time again. */ now = mach_absolute_time(); trigger = &timerv[timer]; /* if we re-programmed the timer to zero, just drop it */ if( !trigger->period ) return 0; /* subtract the time the pet sample took being careful not to underflow */ if ( trigger->period > elapsed_ticks ) period = trigger->period - elapsed_ticks; /* make sure we don't set the next PET sample to happen too soon */ if ( period < pet_min_ticks ) period = pet_min_ticks; /* calculate deadline */ deadline = now + period; BUF_INFO(PERF_PET_SCHED, trigger->period, period, elapsed_ticks, deadline); /* re-schedule the timer, making sure we don't apply slop */ timer_call_enter( &trigger->tcall, deadline, TIMER_CALL_SYS_CRITICAL); return 0; }
static void kperf_ipi_handler( void *param ) { int r; int ncpu; struct kperf_sample *intbuf = NULL; struct kperf_context ctx; struct time_trigger *trigger = param; task_t task = NULL; /* Always cut a tracepoint to show a sample event occurred */ BUF_DATA1(PERF_TM_HNDLR | DBG_FUNC_START, 0); /* In an interrupt, get the interrupt buffer for this CPU */ intbuf = kperf_intr_sample_buffer(); /* On a timer, we can see the "real" current thread */ ctx.cur_pid = 0; /* remove this? */ ctx.cur_thread = current_thread(); task = chudxnu_task_for_thread(ctx.cur_thread); if (task) ctx.cur_pid = chudxnu_pid_for_task(task); /* who fired */ ctx.trigger_type = TRIGGER_TYPE_TIMER; ctx.trigger_id = (unsigned)(trigger-timerv); /* computer timer number */ ncpu = chudxnu_cpu_number(); if (ctx.trigger_id == pet_timer && ncpu < machine_info.logical_cpu_max) kperf_thread_on_cpus[ncpu] = ctx.cur_thread; /* check samppling is on */ if( kperf_sampling_status() == KPERF_SAMPLING_OFF ) { BUF_INFO1(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_OFF); return; } else if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN ) { BUF_INFO1(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_SHUTDOWN); return; } /* call the action -- kernel-only from interrupt, pend user */ r = kperf_sample( intbuf, &ctx, trigger->actionid, SAMPLE_FLAG_PEND_USER ); /* end tracepoint is informational */ BUF_INFO1(PERF_TM_HNDLR | DBG_FUNC_END, r); }
/* program the timer from the PET thread */ void kperf_timer_pet_rearm(uint64_t elapsed_ticks) { struct kperf_timer *timer = NULL; uint64_t period = 0; uint64_t deadline; /* * If the pet_timer_id is invalid, it has been disabled, so this should * do nothing. */ if (pet_timer_id >= kperf_timerc) { return; } unsigned int status = kperf_sampling_status(); /* do not reprogram the timer if it has been shutdown or sampling is off */ if (status == KPERF_SAMPLING_OFF) { BUF_INFO(PERF_PET_END, SAMPLE_OFF); return; } else if (status == KPERF_SAMPLING_SHUTDOWN) { BUF_INFO(PERF_PET_END, SAMPLE_SHUTDOWN); return; } timer = &(kperf_timerv[pet_timer_id]); /* if we re-programmed the timer to zero, just drop it */ if (!timer->period) { return; } /* subtract the time the pet sample took being careful not to underflow */ if (timer->period > elapsed_ticks) { period = timer->period - elapsed_ticks; } /* make sure we don't set the next PET sample to happen too soon */ if (period < min_period_pet_abstime) { period = min_period_pet_abstime; } /* we probably took so long in the PET thread, it makes sense to take * the time again. */ deadline = mach_absolute_time() + period; BUF_INFO(PERF_PET_SCHED, timer->period, period, elapsed_ticks, deadline); /* re-schedule the timer, making sure we don't apply slop */ timer_call_enter(&(timer->tcall), deadline, TIMER_CALL_SYS_CRITICAL); return; }
/* Translate actionid into sample bits and take a sample */ kern_return_t kperf_sample( struct kperf_sample *sbuf, struct kperf_context *context, unsigned actionid, boolean_t pend_user ) { unsigned sample_what = 0; /* check samppling is on, or panic */ if( kperf_sampling_status() == KPERF_SAMPLING_OFF ) panic("trigger fired while sampling off"); else if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN ) return SAMPLE_SHUTDOWN; /* work out what to sample, if anything */ if( actionid >= actionc ) return SAMPLE_SHUTDOWN; sample_what = actionv[actionid].sample; return kperf_sample_internal( sbuf, context, sample_what, pend_user ); }
/* ast callback on a thread */ void kperf_thread_ast_handler( thread_t thread ) { int r; uint32_t t_chud; unsigned sample_what = 0; /* we know we're on a thread, so let's do stuff */ task_t task = NULL; /* Don't sample if we are shutting down or off */ if( kperf_sampling_status() != KPERF_SAMPLING_ON ) return; BUF_INFO1(PERF_AST_HNDLR | DBG_FUNC_START, thread); /* FIXME: probably want a faster allocator here... :P */ struct kperf_sample *sbuf = kalloc( sizeof(*sbuf) ); if( sbuf == NULL ) { /* FIXME: error code */ BUF_INFO1( PERF_AST_ERROR, 0 ); goto error; } /* make a context, take a sample */ struct kperf_context ctx; ctx.cur_thread = thread; ctx.cur_pid = -1; task = chudxnu_task_for_thread(thread); if(task) ctx.cur_pid = chudxnu_pid_for_task(task); /* decode the chud bits so we know what to sample */ t_chud = kperf_get_thread_bits(thread); if (t_chud & T_AST_NAME) sample_what |= SAMPLER_TINFOEX; if (t_chud & T_AST_CALLSTACK) sample_what |= SAMPLER_USTACK; /* do the sample, just of the user stuff */ r = kperf_sample_internal( sbuf, &ctx, sample_what, FALSE ); /* free it again */ kfree( sbuf, sizeof(*sbuf) ); error: BUF_INFO1(PERF_AST_HNDLR | DBG_FUNC_END, r); }
void kperf_ipi_handler(void *param) { struct kperf_context ctx; struct kperf_timer *timer = param; assert(timer != NULL); /* Always cut a tracepoint to show a sample event occurred */ BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START, 0); int ncpu = cpu_number(); struct kperf_sample *intbuf = kperf_intr_sample_buffer(); /* On a timer, we can see the "real" current thread */ ctx.cur_thread = current_thread(); ctx.cur_pid = task_pid(get_threadtask(ctx.cur_thread)); /* who fired */ ctx.trigger_type = TRIGGER_TYPE_TIMER; ctx.trigger_id = (unsigned int)(timer - kperf_timerv); if (ctx.trigger_id == pet_timer_id && ncpu < machine_info.logical_cpu_max) { kperf_thread_on_cpus[ncpu] = ctx.cur_thread; } /* make sure sampling is on */ unsigned int status = kperf_sampling_status(); if (status == KPERF_SAMPLING_OFF) { BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_OFF); return; } else if (status == KPERF_SAMPLING_SHUTDOWN) { BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_SHUTDOWN); return; } /* call the action -- kernel-only from interrupt, pend user */ int r = kperf_sample(intbuf, &ctx, timer->actionid, SAMPLE_FLAG_PEND_USER); /* end tracepoint is informational */ BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, r); #if defined(__x86_64__) (void)atomic_bit_clear(&(timer->pending_cpus), ncpu, __ATOMIC_RELAXED); #endif /* defined(__x86_64__) */ }
static void kperf_timer_handler(void *param0, __unused void *param1) { struct kperf_timer *timer = param0; unsigned int ntimer = (unsigned int)(timer - kperf_timerv); unsigned int ncpus = machine_info.logical_cpu_max; timer->active = 1; /* along the lines of do not ipi if we are all shutting down */ if (kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN) { goto deactivate; } BUF_DATA(PERF_TM_FIRE, ntimer, ntimer == pet_timer_id, timer->period, timer->actionid); if (ntimer == pet_timer_id) { kperf_pet_fire_before(); /* clean-up the thread-on-CPUs cache */ bzero(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus)); } /* ping all CPUs */ kperf_mp_broadcast_running(timer); /* release the pet thread? */ if (ntimer == pet_timer_id) { /* PET mode is responsible for rearming the timer */ kperf_pet_fire_after(); } else { /* * FIXME: Get the current time from elsewhere. The next * timer's period now includes the time taken to reach this * point. This causes a bias towards longer sampling periods * than requested. */ kperf_timer_schedule(timer, mach_absolute_time()); } deactivate: timer->active = 0; }
static void kperf_timer_handler( void *param0, __unused void *param1 ) { struct time_trigger *trigger = param0; unsigned ntimer = (unsigned)(trigger - timerv); unsigned ncpus = machine_info.logical_cpu_max; trigger->active = 1; /* along the lines of do not ipi if we are all shutting down */ if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN ) goto deactivate; /* clean-up the thread-on-CPUs cache */ bzero(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus)); /* ping all CPUs */ #ifndef USE_SIMPLE_SIGNALS kperf_mp_broadcast( kperf_ipi_handler, trigger ); #else trigger->fire_count++; OSMemoryBarrier(); kperf_mp_signal(); #endif /* release the pet thread? */ if( ntimer == pet_timer ) { /* timer re-enabled when thread done */ kperf_pet_thread_go(); } else { /* re-enable the timer * FIXME: get the current time from elsewhere */ uint64_t now = mach_absolute_time(); kperf_timer_schedule( trigger, now ); } deactivate: trigger->active = 0; }
static int sysctl_sampling( struct sysctl_oid *oidp, struct sysctl_req *req ) { int error = 0; uint32_t value = 0; /* get the old value and process it */ value = kperf_sampling_status(); /* copy out the old value, get the new value */ error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); /* if that worked, and we're writing... */ if( value ) error = kperf_sampling_enable(); else error = kperf_sampling_disable(); return error; }