/* program the timer from the PET thread */ void kperf_timer_pet_rearm(uint64_t elapsed_ticks) { struct kperf_timer *timer = NULL; uint64_t period = 0; uint64_t deadline; /* * If the pet_timer_id is invalid, it has been disabled, so this should * do nothing. */ if (pet_timer_id >= kperf_timerc) { return; } unsigned int status = kperf_sampling_status(); /* do not reprogram the timer if it has been shutdown or sampling is off */ if (status == KPERF_SAMPLING_OFF) { BUF_INFO(PERF_PET_END, SAMPLE_OFF); return; } else if (status == KPERF_SAMPLING_SHUTDOWN) { BUF_INFO(PERF_PET_END, SAMPLE_SHUTDOWN); return; } timer = &(kperf_timerv[pet_timer_id]); /* if we re-programmed the timer to zero, just drop it */ if (!timer->period) { return; } /* subtract the time the pet sample took being careful not to underflow */ if (timer->period > elapsed_ticks) { period = timer->period - elapsed_ticks; } /* make sure we don't set the next PET sample to happen too soon */ if (period < min_period_pet_abstime) { period = min_period_pet_abstime; } /* we probably took so long in the PET thread, it makes sense to take * the time again. */ deadline = mach_absolute_time() + period; BUF_INFO(PERF_PET_SCHED, timer->period, period, elapsed_ticks, deadline); /* re-schedule the timer, making sure we don't apply slop */ timer_call_enter(&(timer->tcall), deadline, TIMER_CALL_SYS_CRITICAL); return; }
void kperf_ipi_handler(void *param) { struct kperf_context ctx; struct kperf_timer *timer = param; assert(timer != NULL); /* Always cut a tracepoint to show a sample event occurred */ BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START, 0); int ncpu = cpu_number(); struct kperf_sample *intbuf = kperf_intr_sample_buffer(); /* On a timer, we can see the "real" current thread */ ctx.cur_thread = current_thread(); ctx.cur_pid = task_pid(get_threadtask(ctx.cur_thread)); /* who fired */ ctx.trigger_type = TRIGGER_TYPE_TIMER; ctx.trigger_id = (unsigned int)(timer - kperf_timerv); if (ctx.trigger_id == pet_timer_id && ncpu < machine_info.logical_cpu_max) { kperf_thread_on_cpus[ncpu] = ctx.cur_thread; } /* make sure sampling is on */ unsigned int status = kperf_sampling_status(); if (status == KPERF_SAMPLING_OFF) { BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_OFF); return; } else if (status == KPERF_SAMPLING_SHUTDOWN) { BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_SHUTDOWN); return; } /* call the action -- kernel-only from interrupt, pend user */ int r = kperf_sample(intbuf, &ctx, timer->actionid, SAMPLE_FLAG_PEND_USER); /* end tracepoint is informational */ BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, r); #if defined(__x86_64__) (void)atomic_bit_clear(&(timer->pending_cpus), ncpu, __ATOMIC_RELAXED); #endif /* defined(__x86_64__) */ }
/* program the timer from the pet thread */ int kperf_timer_pet_set( unsigned timer, uint64_t elapsed_ticks ) { static uint64_t pet_min_ticks = 0; uint64_t now; struct time_trigger *trigger = NULL; uint64_t period = 0; uint64_t deadline; /* compute ns -> ticks */ if( pet_min_ticks == 0 ) nanoseconds_to_absolutetime(MIN_PET_TIMER_NS, &pet_min_ticks); if( timer != pet_timer ) panic( "PET setting with bogus ID\n" ); if( timer >= timerc ) return EINVAL; if( kperf_sampling_status() == KPERF_SAMPLING_OFF ) { BUF_INFO1(PERF_PET_END, SAMPLE_OFF); return 0; } // don't repgram the timer if it's been shutdown if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN ) { BUF_INFO1(PERF_PET_END, SAMPLE_SHUTDOWN); return 0; } /* CHECKME: we probably took so damn long in the PET thread, * it makes sense to take the time again. */ now = mach_absolute_time(); trigger = &timerv[timer]; /* if we re-programmed the timer to zero, just drop it */ if( !trigger->period ) return 0; /* subtract the time the pet sample took being careful not to underflow */ if ( trigger->period > elapsed_ticks ) period = trigger->period - elapsed_ticks; /* make sure we don't set the next PET sample to happen too soon */ if ( period < pet_min_ticks ) period = pet_min_ticks; /* calculate deadline */ deadline = now + period; BUF_INFO(PERF_PET_SCHED, trigger->period, period, elapsed_ticks, deadline); /* re-schedule the timer, making sure we don't apply slop */ timer_call_enter( &trigger->tcall, deadline, TIMER_CALL_SYS_CRITICAL); return 0; }
void kperf_task_snapshot_sample(struct kperf_task_snapshot *tksn, struct kperf_context *ctx) { thread_t thread; task_t task; BUF_INFO(PERF_TK_SNAP_SAMPLE | DBG_FUNC_START); assert(tksn != NULL); assert(ctx != NULL); thread = ctx->cur_thread; task = get_threadtask(thread); tksn->kptksn_flags = 0; if (task->effective_policy.tep_darwinbg) { tksn->kptksn_flags |= KPERF_TASK_FLAG_DARWIN_BG; } if (task->requested_policy.trp_role == TASK_FOREGROUND_APPLICATION) { tksn->kptksn_flags |= KPERF_TASK_FLAG_FOREGROUND; } if (task->requested_policy.trp_boosted == 1) { tksn->kptksn_flags |= KPERF_TASK_FLAG_BOOSTED; } #if CONFIG_MEMORYSTATUS if (memorystatus_proc_is_dirty_unsafe(task->bsd_info)) { tksn->kptksn_flags |= KPERF_TASK_FLAG_DIRTY; } #endif tksn->kptksn_suspend_count = task->suspend_count; tksn->kptksn_pageins = task->pageins; tksn->kptksn_user_time_in_terminated_threads = task->total_user_time; tksn->kptksn_system_time_in_terminated_threads = task->total_system_time; BUF_INFO(PERF_TK_SNAP_SAMPLE | DBG_FUNC_END); }
static void kperf_timer_schedule(struct kperf_timer *timer, uint64_t now) { BUF_INFO(PERF_TM_SCHED, timer->period); /* if we re-programmed the timer to zero, just drop it */ if (timer->period == 0) { return; } /* calculate deadline */ uint64_t deadline = now + timer->period; /* re-schedule the timer, making sure we don't apply slop */ timer_call_enter(&timer->tcall, deadline, TIMER_CALL_SYS_CRITICAL); }