/* * Debug related code, dump vcpu/cpu information */ static void rt_dump_vcpu(struct rt_vcpu *svc) { char *cpustr = keyhandler_scratch; if ( svc == NULL ) { printk("NULL!\n"); return; } cpumask_scnprintf(cpustr, sizeof(cpustr), svc->vcpu->cpu_hard_affinity); printk("OCBP:_dumpP: [%5d.%-2d] cpu %d, Period %"PRId64", Budget_low %"PRId64", Budget_high %"PRId64", Deadline %"PRId64",Criticality %d,Offline_flag %d, cur_b=%"PRId64",onR=%d runnable=%d cpu_hard_affinity=%s", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id, svc->vcpu->processor, svc->period, svc->budget_low, svc->budget_high, svc->deadline, svc->criticality_vcpu, svc->offl_flag, svc->cur_budget, __vcpu_on_runq(svc), vcpu_runnable(svc->vcpu), cpustr); memset(cpustr, 0, sizeof(char)*1024); cpumask_scnprintf(cpustr, sizeof(cpustr), cpupool_scheduler_cpumask(svc->vcpu->domain->cpupool)); printk("cpupool=%s\n", cpustr); }
static inline void __runq_insert(unsigned int cpu, struct csched_vcpu *svc) { const struct list_head * const runq = RUNQ(cpu); struct list_head *iter; BUG_ON( __vcpu_on_runq(svc) ); BUG_ON( cpu != svc->vcpu->processor ); list_for_each( iter, runq ) { const struct csched_vcpu * const iter_svc = __runq_elem(iter); if ( svc->pri > iter_svc->pri ) break; } /* If the vcpu yielded, try to put it behind one lower-priority * runnable vcpu if we can. The next runq_sort will bring it forward * within 30ms if the queue too long. */ if ( svc->flags & CSCHED_FLAG_VCPU_YIELD && __runq_elem(iter)->pri > CSCHED_PRI_IDLE ) { iter=iter->next; /* Some sanity checks */ BUG_ON(iter == runq); } list_add_tail(&svc->runq_elem, iter); }
static inline void __runq_remove(struct csched_vcpu *svc) { BUG_ON( !__vcpu_on_runq(svc) ); list_del_init(&svc->runq_elem); }