/* * should not need lock here. only showing stuff */ static void rt_dump(const struct scheduler *ops) { struct list_head *iter_sdom, *iter_svc, *runq, *iter; struct rt_private *prv = RT_PRIV(ops); struct rt_vcpu *svc; int cpu = 0; int loop = 0; printtime(); printk("OCBP:_dumpV Priority Scheme: OCBP\n"); printk("PCPU info: \n"); for_each_cpu(cpu, &prv->cpus) rt_dump_pcpu(ops, cpu); printk("OCBP:_dumpV Global RunQueue info: \n"); loop = 0; runq = RUNQ(ops); list_for_each( iter, runq ) { svc = __runq_elem(iter); printk("\tOCBP:_dumpV RunQ no:%3d: ", ++loop); rt_dump_vcpu(svc); }
static inline void __runq_insert(unsigned int cpu, struct csched_vcpu *svc) { const struct list_head * const runq = RUNQ(cpu); struct list_head *iter; BUG_ON( __vcpu_on_runq(svc) ); BUG_ON( cpu != svc->vcpu->processor ); list_for_each( iter, runq ) { const struct csched_vcpu * const iter_svc = __runq_elem(iter); if ( svc->pri > iter_svc->pri ) break; } /* If the vcpu yielded, try to put it behind one lower-priority * runnable vcpu if we can. The next runq_sort will bring it forward * within 30ms if the queue too long. */ if ( svc->flags & CSCHED_FLAG_VCPU_YIELD && __runq_elem(iter)->pri > CSCHED_PRI_IDLE ) { iter=iter->next; /* Some sanity checks */ BUG_ON(iter == runq); } list_add_tail(&svc->runq_elem, iter); }