Exemplo n.º 1
0
    for_each_vcpu ( d, v )
    {
        spinlock_t *lock;

        vcpudata = v->sched_priv;

        migrate_timer(&v->periodic_timer, new_p);
        migrate_timer(&v->singleshot_timer, new_p);
        migrate_timer(&v->poll_timer, new_p);

        cpumask_setall(v->cpu_hard_affinity);
        cpumask_setall(v->cpu_soft_affinity);

        lock = vcpu_schedule_lock_irq(v);
        v->processor = new_p;
        /*
         * With v->processor modified we must not
         * - make any further changes assuming we hold the scheduler lock,
         * - use vcpu_schedule_unlock_irq().
         */
        spin_unlock_irq(lock);

        v->sched_priv = vcpu_priv[v->vcpu_id];
        if ( !d->is_dying )
            sched_move_irqs(v);

        new_p = cpumask_cycle(new_p, c->cpu_valid);

        SCHED_OP(c->sched, insert_vcpu, v);

        SCHED_OP(old_ops, free_vdata, vcpudata);
    }
Exemplo n.º 2
0
int sched_move_domain(struct domain *d, struct cpupool *c)
{
    struct vcpu *v;
    unsigned int new_p;
    void **vcpu_priv;
    void *domdata;
    void *vcpudata;
    struct scheduler *old_ops;
    void *old_domdata;

    domdata = SCHED_OP(c->sched, alloc_domdata, d);
    if ( domdata == NULL )
        return -ENOMEM;

    vcpu_priv = xzalloc_array(void *, d->max_vcpus);
    if ( vcpu_priv == NULL )
    {
        SCHED_OP(c->sched, free_domdata, domdata);
        return -ENOMEM;
    }

    for_each_vcpu ( d, v )
    {
        vcpu_priv[v->vcpu_id] = SCHED_OP(c->sched, alloc_vdata, v, domdata);
        if ( vcpu_priv[v->vcpu_id] == NULL )
        {
            for_each_vcpu ( d, v )
                xfree(vcpu_priv[v->vcpu_id]);
            xfree(vcpu_priv);
            SCHED_OP(c->sched, free_domdata, domdata);
            return -ENOMEM;
        }
    }
Exemplo n.º 3
0
int sched_init_vcpu(struct vcpu *v, unsigned int processor) 
{
    struct domain *d = v->domain;

    /*
     * Initialize processor and affinity settings. The idler, and potentially
     * domain-0 VCPUs, are pinned onto their respective physical CPUs.
     */
    v->processor = processor;
    if ( is_idle_domain(d) || d->is_pinned )
        v->cpu_affinity = cpumask_of_cpu(processor);
    else
        cpus_setall(v->cpu_affinity);

    /* Initialise the per-vcpu timers. */
    init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
               v, v->processor);
    init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn,
               v, v->processor);
    init_timer(&v->poll_timer, poll_timer_fn,
               v, v->processor);

    /* Idle VCPUs are scheduled immediately. */
    if ( is_idle_domain(d) )
    {
        per_cpu(schedule_data, v->processor).curr = v;
        per_cpu(schedule_data, v->processor).idle = v;
        v->is_running = 1;
    }

    TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);

    return SCHED_OP(init_vcpu, v);
}
Exemplo n.º 4
0
static void vcpu_migrate(struct vcpu *v)
{
    unsigned long flags;
    int old_cpu;

    vcpu_schedule_lock_irqsave(v, flags);

    /*
     * NB. Check of v->running happens /after/ setting migration flag
     * because they both happen in (different) spinlock regions, and those
     * regions are strictly serialised.
     */
    if ( v->is_running ||
         !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
    {
        vcpu_schedule_unlock_irqrestore(v, flags);
        return;
    }

    /* Switch to new CPU, then unlock old CPU. */
    old_cpu = v->processor;
    v->processor = SCHED_OP(pick_cpu, v);
    spin_unlock_irqrestore(
        &per_cpu(schedule_data, old_cpu).schedule_lock, flags);

    /* Wake on new CPU. */
    vcpu_wake(v);
}
Exemplo n.º 5
0
void sched_destroy_vcpu(struct vcpu *v)
{
    kill_timer(&v->periodic_timer);
    kill_timer(&v->singleshot_timer);
    kill_timer(&v->poll_timer);
    SCHED_OP(destroy_vcpu, v);
}
Exemplo n.º 6
0
int sched_init_vcpu(struct vcpu *v, unsigned int processor) 
{
    struct domain *d = v->domain;

    /*
     * Initialize processor and affinity settings. The idler, and potentially
     * domain-0 VCPUs, are pinned onto their respective physical CPUs.
     */
    v->processor = processor;
    if ( is_idle_domain(d) || d->is_pinned )
        cpumask_copy(v->cpu_hard_affinity, cpumask_of(processor));
    else
        cpumask_setall(v->cpu_hard_affinity);

    cpumask_setall(v->cpu_soft_affinity);

    /* Initialise the per-vcpu timers. */
    init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
               v, v->processor);
    init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn,
               v, v->processor);
    init_timer(&v->poll_timer, poll_timer_fn,
               v, v->processor);

    /* Idle VCPUs are scheduled immediately. */
    if ( is_idle_domain(d) )
    {
        per_cpu(schedule_data, v->processor).curr = v;
        v->is_running = 1;
    }

    TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);

    v->sched_priv = SCHED_OP(DOM2OP(d), alloc_vdata, v, d->sched_priv);
    if ( v->sched_priv == NULL )
        return 1;

    SCHED_OP(DOM2OP(d), insert_vcpu, v);

    return 0;
}
Exemplo n.º 7
0
void vcpu_sleep_nosync(struct vcpu *v)
{
    unsigned long flags;

    vcpu_schedule_lock_irqsave(v, flags);

    if ( likely(!vcpu_runnable(v)) )
    {
        if ( v->runstate.state == RUNSTATE_runnable )
            vcpu_runstate_change(v, RUNSTATE_offline, NOW());

        SCHED_OP(sleep, v);
    }

    vcpu_schedule_unlock_irqrestore(v, flags);

    TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
}
Exemplo n.º 8
0
void vcpu_wake(struct vcpu *v)
{
    unsigned long flags;

    vcpu_schedule_lock_irqsave(v, flags);

    if ( likely(vcpu_runnable(v)) )
    {
        if ( v->runstate.state >= RUNSTATE_blocked )
            vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
        SCHED_OP(wake, v);
    }
    else if ( !test_bit(_VPF_blocked, &v->pause_flags) )
    {
        if ( v->runstate.state == RUNSTATE_blocked )
            vcpu_runstate_change(v, RUNSTATE_offline, NOW());
    }

    vcpu_schedule_unlock_irqrestore(v, flags);

    TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
}
Exemplo n.º 9
0
void sched_destroy_domain(struct domain *d)
{
    SCHED_OP(destroy_domain, d);
}
Exemplo n.º 10
0
int sched_init_domain(struct domain *d)
{
    return SCHED_OP(init_domain, d);
}
Exemplo n.º 11
0
 for_each_vcpu ( d, v )
 {
     SCHED_OP(old_ops, remove_vcpu, v);
 }