int sched_init_vcpu(struct vcpu *v, unsigned int processor) 
{
    struct domain *d = v->domain;

    /*
     * Initialize processor and affinity settings. The idler, and potentially
     * domain-0 VCPUs, are pinned onto their respective physical CPUs.
     */
    v->processor = processor;
    if ( is_idle_domain(d) || d->is_pinned )
        v->cpu_affinity = cpumask_of_cpu(processor);
    else
        cpus_setall(v->cpu_affinity);

    /* Initialise the per-vcpu timers. */
    init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
               v, v->processor);
    init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn,
               v, v->processor);
    init_timer(&v->poll_timer, poll_timer_fn,
               v, v->processor);

    /* Idle VCPUs are scheduled immediately. */
    if ( is_idle_domain(d) )
    {
        per_cpu(schedule_data, v->processor).curr = v;
        per_cpu(schedule_data, v->processor).idle = v;
        v->is_running = 1;
    }

    TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);

    return SCHED_OP(init_vcpu, v);
}
/**
 *	dynamic_irq_init - initialize a dynamically allocated irq
 *	@irq:	irq number to initialize
 */
void dynamic_irq_init(unsigned int irq)
{
	struct irq_desc *desc;
	unsigned long flags;

	if (irq >= NR_IRQS) {
		WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
		return;
	}

	/* Ensure we don't have left over values from a previous use of this irq */
	desc = irq_desc + irq;
	spin_lock_irqsave(&desc->lock, flags);
	desc->status = IRQ_DISABLED;
	desc->chip = &no_irq_chip;
	desc->handle_irq = handle_bad_irq;
	desc->depth = 1;
	desc->msi_desc = NULL;
	desc->handler_data = NULL;
	desc->chip_data = NULL;
	desc->action = NULL;
	desc->irq_count = 0;
	desc->irqs_unhandled = 0;
#ifdef CONFIG_SMP
	cpus_setall(desc->affinity);
#endif
	spin_unlock_irqrestore(&desc->lock, flags);
}
Beispiel #3
0
static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
{
	u64 gr28, gr29, gr30, gr31;
	struct ia64_pal_retval result = {0, 0, 0, 0};
	struct cache_flush_args args = {0, 0, 0, 0};
	long psr;

	gr28 = gr29 = gr30 = gr31 = 0;
	kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);

	if (gr31 != 0)
		printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);

	/* Always call Host Pal in int=1 */
	gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
	args.cache_type = gr29;
	args.operation = gr30;
	smp_call_function(remote_pal_cache_flush,
				(void *)&args, 1);
	if (args.status != 0)
		printk(KERN_ERR"pal_cache_flush error!,"
				"status:0x%lx\n", args.status);
	/*
	 * Call Host PAL cache flush
	 * Clear psr.ic when call PAL_CACHE_FLUSH
	 */
	local_irq_save(psr);
	result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
						&result.v0);
	local_irq_restore(psr);
	if (result.status != 0)
		printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
				"in1:%lx,in2:%lx\n",
				vcpu, result.status, gr29, gr30);

#if 0
	if (gr29 == PAL_CACHE_TYPE_COHERENT) {
		cpus_setall(vcpu->arch.cache_coherent_map);
		cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
		cpus_setall(cpu_cache_coherent_map);
		cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
	}
#endif
	return result;
}
Beispiel #4
0
/*
 * This function is used by cpu_hotplug code from stop_machine context.
 * Hence we can avoid needing to take the 
 */
void cpu_disable_scheduler(void)
{
    struct domain *d;
    struct vcpu *v;
    unsigned int cpu = smp_processor_id();

    for_each_domain ( d )
    {
        for_each_vcpu ( d, v )
        {
            if ( is_idle_vcpu(v) )
                continue;

            if ( (cpus_weight(v->cpu_affinity) == 1) &&
                 cpu_isset(cpu, v->cpu_affinity) )
            {
                printk("Breaking vcpu affinity for domain %d vcpu %d\n",
                        v->domain->domain_id, v->vcpu_id);
                cpus_setall(v->cpu_affinity);
            }

            /*
             * Migrate single-shot timers to CPU0. A new cpu will automatically
             * be chosen when the timer is next re-set.
             */
            if ( v->singleshot_timer.cpu == cpu )
                migrate_timer(&v->singleshot_timer, 0);

            if ( v->processor == cpu )
            {
                set_bit(_VPF_migrating, &v->pause_flags);
                vcpu_sleep_nosync(v);
                vcpu_migrate(v);
            }
        }
    }
}
Beispiel #5
0
static int sys_vperfctr_control(struct vperfctr *perfctr,
				struct perfctr_struct_buf *argp,
				struct task_struct *tsk)
{
	struct vperfctr_control control;
	int err;
	unsigned int next_cstatus;
	unsigned int nrctrs, i;
	cpumask_t cpumask;

	if (!tsk)
		return -ESRCH;	/* attempt to update unlinked perfctr */

	err = perfctr_copy_from_user(&control, argp, &vperfctr_control_sdesc);
	if (err)
		return err;

	/* Step 1: Update the control but keep the counters disabled.
	   PREEMPT note: Preemption is disabled since we're updating
	   an active perfctr. */
	preempt_disable();
	if (IS_RUNNING(perfctr)) {
		if (tsk == current)
			vperfctr_suspend(perfctr);
		perfctr->cpu_state.cstatus = 0;
		vperfctr_clear_iresume_cstatus(perfctr);
	}
	perfctr->cpu_state.control = control.cpu_control;
	/* remote access note: perfctr_cpu_update_control() is ok */
	cpus_setall(cpumask);
#ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK
	/* make a stopped vperfctr have an unconstrained cpumask */
	perfctr->cpumask = cpumask;
#endif
	err = perfctr_cpu_update_control(&perfctr->cpu_state, &cpumask);
	if (err < 0) {
		next_cstatus = 0;
	} else {
		next_cstatus = perfctr->cpu_state.cstatus;
		perfctr->cpu_state.cstatus = 0;
		perfctr->updater_tgid = current->tgid;
#ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK
		perfctr->cpumask = cpumask;
#endif
	}
	preempt_enable_no_resched();

	if (!perfctr_cstatus_enabled(next_cstatus))
		return err;

#ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK
	/* Step 2: Update the task's CPU affinity mask.
	   PREEMPT note: Preemption must be enabled for set_cpus_allowed(). */
	if (control.cpu_control.nractrs || control.cpu_control.nrictrs) {
		cpumask_t old_mask, new_mask;

		old_mask = tsk->cpus_allowed;
		cpus_and(new_mask, old_mask, cpumask);

		if (cpus_empty(new_mask))
			return -EINVAL;
		if (!cpus_equal(new_mask, old_mask))
			set_cpus_allowed(tsk, new_mask);
	}
#endif

	/* Step 3: Enable the counters with the new control and affinity.
	   PREEMPT note: Preemption is disabled since we're updating
	   an active perfctr. */
	preempt_disable();

	/* We had to enable preemption above for set_cpus_allowed() so we may
	   have lost a race with a concurrent update via the remote control
	   interface. If so then we must abort our update of this perfctr. */
	if (perfctr->updater_tgid != current->tgid) {
		printk(KERN_WARNING "perfctr: control update by task %d"
		       " was lost due to race with update by task %d\n",
		       current->tgid, perfctr->updater_tgid);
		err = -EBUSY;
	} else {
		/* XXX: validate si_signo? */
		perfctr->si_signo = control.si_signo;

		perfctr->cpu_state.cstatus = next_cstatus;

		if (!perfctr_cstatus_has_tsc(next_cstatus))
			perfctr->cpu_state.tsc_sum = 0;

		nrctrs = perfctr_cstatus_nrctrs(next_cstatus);
		for(i = 0; i < nrctrs; ++i)
			if (!(control.preserve & (1<<i)))
				perfctr->cpu_state.pmc[i].sum = 0;

		perfctr->flags = control.flags;

		if (tsk == current)
			vperfctr_resume(perfctr);
	}

	preempt_enable();
	return err;
}
Beispiel #6
0
static inline void vperfctr_init_cpumask(struct vperfctr *perfctr)
{
	cpus_setall(perfctr->cpumask);
}