Esempio n. 1
0
void flush_tlb_current_task(void)
{
	struct mm_struct *mm = current->mm;
	cpumask_t cpu_mask;

	preempt_disable();
	cpu_mask = mm->cpu_vm_mask;
	cpu_clear(smp_processor_id(), cpu_mask);

	local_flush_tlb();
	if (!cpus_empty(cpu_mask))
		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
	preempt_enable();
}
Esempio n. 2
0
static ssize_t
salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
	struct inode *inode = file->f_dentry->d_inode;
	struct proc_dir_entry *entry = PDE(inode);
	struct salinfo_data *data = entry->data;
	char cmd[32];
	size_t size;
	int i, n, cpu = -1;

retry:
	if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) {
		if (file->f_flags & O_NONBLOCK)
			return -EAGAIN;
		if (down_interruptible(&data->mutex))
			return -EINTR;
	}

	n = data->cpu_check;
	for (i = 0; i < NR_CPUS; i++) {
		if (cpu_isset(n, data->cpu_event)) {
			if (!cpu_online(n)) {
				cpu_clear(n, data->cpu_event);
				continue;
			}
			cpu = n;
			break;
		}
		if (++n == NR_CPUS)
			n = 0;
	}

	if (cpu == -1)
		goto retry;

	/* for next read, start checking at next CPU */
	data->cpu_check = cpu;
	if (++data->cpu_check == NR_CPUS)
		data->cpu_check = 0;

	snprintf(cmd, sizeof(cmd), "read %d\n", cpu);

	size = strlen(cmd);
	if (size > count)
		size = count;
	if (copy_to_user(buffer, cmd, size))
		return -EFAULT;

	return size;
}
Esempio n. 3
0
/**
 * smp_cache_call - Issue an IPI to request the other CPUs flush caches
 * @opr_mask: Cache operation flags
 * @start: Start address of request
 * @end: End address of request
 *
 * Send cache flush IPI to other CPUs.  This invokes smp_cache_interrupt()
 * above on those other CPUs and then waits for them to finish.
 *
 * The caller must hold smp_cache_lock.
 */
void smp_cache_call(unsigned long opr_mask,
		    unsigned long start, unsigned long end)
{
	smp_cache_mask = opr_mask;
	smp_cache_start = start;
	smp_cache_end = end;
	smp_cache_ipi_map = cpu_online_map;
	cpu_clear(smp_processor_id(), smp_cache_ipi_map);

	send_IPI_allbutself(FLUSH_CACHE_IPI);

	while (!cpus_empty(smp_cache_ipi_map))
		/* nothing. lockup detection does not belong here */
		mb();
}
Esempio n. 4
0
/**
 * @fn int xnsched_run(void)
 * @brief The rescheduling procedure.
 *
 * This is the central rescheduling routine which should be called to
 * validate and apply changes which have previously been made to the
 * nucleus scheduling state, such as suspending, resuming or changing
 * the priority of threads.  This call performs context switches as
 * needed. xnsched_run() schedules out the current thread if:
 *
 * - the current thread is about to block.
 * - a runnable thread from a higher priority scheduling class is
 * waiting for the CPU.
 * - the current thread does not lead the runnable threads from its
 * own scheduling class (i.e. round-robin).
 *
 * The Cobalt core implements a lazy rescheduling scheme so that most
 * of the services affecting the threads state MUST be followed by a
 * call to the rescheduling procedure for the new scheduling state to
 * be applied.
 *
 * In other words, multiple changes on the scheduler state can be done
 * in a row, waking threads up, blocking others, without being
 * immediately translated into the corresponding context switches.
 * When all changes have been applied, xnsched_run() should be called
 * for considering those changes, and possibly switching context.
 *
 * As a notable exception to the previous principle however, every
 * action which ends up suspending the current thread begets an
 * implicit call to the rescheduling procedure on behalf of the
 * blocking service.
 *
 * Typically, self-suspension or sleeping on a synchronization object
 * automatically leads to a call to the rescheduling procedure,
 * therefore the caller does not need to explicitly issue
 * xnsched_run() after such operations.
 *
 * The rescheduling procedure always leads to a null-effect if it is
 * called on behalf of an interrupt service routine. Any outstanding
 * scheduler lock held by the outgoing thread will be restored when
 * the thread is scheduled back in.
 *
 * Calling this procedure with no applicable context switch pending is
 * harmless and simply leads to a null-effect.
 *
 * @return Non-zero is returned if a context switch actually happened,
 * otherwise zero if the current thread was left running.
 *
 * @coretags{unrestricted}
 */
static inline int test_resched(struct xnsched *sched)
{
	int resched = xnsched_resched_p(sched);
#ifdef CONFIG_SMP
	/* Send resched IPI to remote CPU(s). */
	if (unlikely(!cpus_empty(sched->resched))) {
		smp_mb();
		ipipe_send_ipi(IPIPE_RESCHEDULE_IPI, sched->resched);
		cpus_clear(sched->resched);
	}
#endif
	sched->status &= ~XNRESCHED;

	return resched;
}
Esempio n. 5
0
/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
static void tick_do_broadcast_on_off(void *why)
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
	unsigned long flags, *reason = why;
	int cpu;

	spin_lock_irqsave(&tick_broadcast_lock, flags);

	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;
	bc = tick_broadcast_device.evtdev;

	/*
	 * Is the device in broadcast mode forever or is it not
	 * affected by the powerstate ?
	 */
	if (!dev || !tick_device_is_functional(dev) ||
	    !(dev->features & CLOCK_EVT_FEAT_C3STOP))
		goto out;

	if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) {
		if (!cpu_isset(cpu, tick_broadcast_mask)) {
			cpu_set(cpu, tick_broadcast_mask);
			if (td->mode == TICKDEV_MODE_PERIODIC)
				clockevents_set_mode(dev,
						     CLOCK_EVT_MODE_SHUTDOWN);
		}
	} else {
		if (cpu_isset(cpu, tick_broadcast_mask)) {
			cpu_clear(cpu, tick_broadcast_mask);
			if (td->mode == TICKDEV_MODE_PERIODIC)
				tick_setup_periodic(dev, 0);
		}
	}

	if (cpus_empty(tick_broadcast_mask))
		clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
	else {
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
		else
			tick_broadcast_setup_oneshot(bc);
	}
out:
	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
Esempio n. 6
0
static char *
kdb_cpus_allowed_string(struct task_struct *tp)
{
	static char maskbuf[NR_CPUS * 8];
	if (cpus_equal(tp->cpus_allowed, cpu_online_map))
		strcpy(maskbuf, "ALL");
	else if (cpus_full(tp->cpus_allowed))
		strcpy(maskbuf, "ALL(NR_CPUS)");
	else if (cpus_empty(tp->cpus_allowed))
		strcpy(maskbuf, "NONE");
	else if (cpus_weight(tp->cpus_allowed) == 1)
		snprintf(maskbuf, sizeof(maskbuf), "ONLY(%d)", first_cpu(tp->cpus_allowed));
	else
		cpulist_scnprintf(maskbuf, sizeof(maskbuf), tp->cpus_allowed);
	return maskbuf;
}
Esempio n. 7
0
File: irq_64.c Progetto: E-LLP/n900
void fixup_irqs(cpumask_t map)
{
	unsigned int irq;
	static int warned;
	struct irq_desc *desc;

	for_each_irq_desc(irq, desc) {
		cpumask_t mask;
		int break_affinity = 0;
		int set_affinity = 1;

		if (irq == 2)
			continue;

		/* interrupt's are disabled at this point */
		spin_lock(&desc->lock);

		if (!irq_has_action(irq) ||
		    cpus_equal(desc->affinity, map)) {
			spin_unlock(&desc->lock);
			continue;
		}

		cpus_and(mask, desc->affinity, map);
		if (cpus_empty(mask)) {
			break_affinity = 1;
			mask = map;
		}

		if (desc->chip->mask)
			desc->chip->mask(irq);

		if (desc->chip->set_affinity)
			desc->chip->set_affinity(irq, mask);
		else if (!(warned++))
			set_affinity = 0;

		if (desc->chip->unmask)
			desc->chip->unmask(irq);

		spin_unlock(&desc->lock);

		if (break_affinity && set_affinity)
			printk("Broke affinity for irq %i\n", irq);
		else if (!set_affinity)
			printk("Cannot set affinity for irq %i\n", irq);
	}
Esempio n. 8
0
static int vperfctr_enable_control(struct vperfctr *perfctr, struct task_struct *tsk)
{
	int err;
	unsigned int next_cstatus;
	unsigned int nrctrs, i;

	if (perfctr->cpu_state.control.header.nractrs ||
	    perfctr->cpu_state.control.header.nrictrs) {
		cpumask_t old_mask, new_mask;

		//old_mask = tsk->cpus_allowed;
		old_mask = tsk->cpu_mask;
		cpus_andnot(new_mask, old_mask, perfctr_cpus_forbidden_mask);

		if (cpus_empty(new_mask))
			return -EINVAL;
		if (!cpus_equal(new_mask, old_mask))
			set_cpus_allowed(tsk, new_mask);
	}

	perfctr->cpu_state.user.cstatus = 0;
	perfctr->resume_cstatus = 0;

	/* remote access note: perfctr_cpu_update_control() is ok */
	err = perfctr_cpu_update_control(&perfctr->cpu_state, 0);
	if (err < 0)
		return err;
	next_cstatus = perfctr->cpu_state.user.cstatus;
	if (!perfctr_cstatus_enabled(next_cstatus))
		return 0;

	if (!perfctr_cstatus_has_tsc(next_cstatus))
		perfctr->cpu_state.user.tsc_sum = 0;

	nrctrs = perfctr_cstatus_nrctrs(next_cstatus);
	for(i = 0; i < nrctrs; ++i)
		if (!(perfctr->preserve & (1<<i)))
			perfctr->cpu_state.user.pmc[i].sum = 0;

	spin_lock(&perfctr->children_lock);
	perfctr->inheritance_id = new_inheritance_id();
	memset(&perfctr->children, 0, sizeof perfctr->children);
	spin_unlock(&perfctr->children_lock);

	return 0;
}
Esempio n. 9
0
void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
{
	cpumask_t mask;

	cpus_and(mask, cpu_online_map, timer_bcast_ipi);
	if (!cpus_empty(mask)) {
#ifdef CONFIG_SMP
		send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
#else
		/*
		 * We can directly call the apic timer interrupt handler
		 * in UP case. Minus all irq related functions
		 */
		up_apic_timer_interrupt_call(regs);
#endif
	}
}
Esempio n. 10
0
int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
			  bool force)
{
	cpumask_t tmask;
	int cpu = 0;
	void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);

	/*
	 * On the legacy Malta development board, all I/O interrupts
	 * are routed through the 8259 and combined in a single signal
	 * to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
	 * that signal is brought to IP2 of both VPEs. To avoid racing
	 * concurrent interrupt service events, IP2 is enabled only on
	 * one VPE, by convention VPE0.  So long as no bits are ever
	 * cleared in the affinity mask, there will never be any
	 * interrupt forwarding.  But as soon as a program or operator
	 * sets affinity for one of the related IRQs, we need to make
	 * sure that we don't ever try to forward across the VPE boundary,
	 * at least not until we engineer a system where the interrupt
	 * _ack() or _end() function can somehow know that it corresponds
	 * to an interrupt taken on another VPE, and perform the appropriate
	 * restoration of Status.IM state using MFTR/MTTR instead of the
	 * normal local behavior. We also ensure that no attempt will
	 * be made to forward to an offline "CPU".
	 */

	cpumask_copy(&tmask, affinity);
	for_each_cpu(cpu, affinity) {
		if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
			cpu_clear(cpu, tmask);
	}
	cpumask_copy(d->affinity, &tmask);

	if (cpus_empty(tmask))
		/*
		 * We could restore a default mask here, but the
		 * runtime code can anyway deal with the null set
		 */
		printk(KERN_WARNING
		       "IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq);

	/* Do any generic SMTC IRQ affinity setup */
	smtc_set_irq_affinity(d->irq, tmask);

	return IRQ_SET_MASK_OK_NOCOPY;
}
Esempio n. 11
0
int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
			  bool force)
{
	cpumask_t tmask;
	int cpu = 0;
	void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);

	/*
                                                             
                                                               
                                                                
                                                               
                                                               
                                                             
                                                         
                                                               
                                                              
                                                                   
                                                               
                                                                  
                                                                     
                                                                 
                                                              
                                           
  */

	cpumask_copy(&tmask, affinity);
	for_each_cpu(cpu, affinity) {
		if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
			cpu_clear(cpu, tmask);
	}
	cpumask_copy(d->affinity, &tmask);

	if (cpus_empty(tmask))
		/*
                                                  
                                                   
   */
		printk(KERN_WARNING
		       "IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq);

	/*                                        */
	smtc_set_irq_affinity(d->irq, tmask);

	return IRQ_SET_MASK_OK_NOCOPY;
}
Esempio n. 12
0
void smp_send_timer_broadcast_ipi(void)
{
	int cpu = smp_processor_id();
	cpumask_t mask;

	cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);

	if (cpu_isset(cpu, mask)) {
		cpu_clear(cpu, mask);
		add_pda(apic_timer_irqs, 1);
		smp_local_timer_interrupt();
	}

	if (!cpus_empty(mask)) {
		send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
	}
}
/*
 * Remove a CPU from broadcasting
 */
void tick_shutdown_broadcast(unsigned int *cpup)
{
	struct clock_event_device *bc;
	unsigned long flags;
	unsigned int cpu = *cpup;

	spin_lock_irqsave(&tick_broadcast_lock, flags);

	bc = tick_broadcast_device.evtdev;
	cpu_clear(cpu, tick_broadcast_mask);

	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
		if (bc && cpus_empty(tick_broadcast_mask))
			clockevents_shutdown(bc);
	}

	spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
Esempio n. 14
0
void flush_tlb_mm(struct mm_struct *mm)
{
    cpumask_t cpu_mask;

    preempt_disable();
    cpu_mask = mm->cpu_vm_mask;
    cpu_clear(smp_processor_id(), cpu_mask);

    if (current->active_mm == mm) {
        if (current->mm)
            local_flush_tlb();
        else
            leave_mm(smp_processor_id());
    }
    if (!cpus_empty(cpu_mask))
        flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);

    preempt_enable();
}
Esempio n. 15
0
void flush_tlb_mm(struct mm_struct *mm)
{
	cpumask_t cpu_mask;
	unsigned int pid;

	preempt_disable();
	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;
	cpu_mask = mm->cpu_vm_mask;
	cpu_clear(smp_processor_id(), cpu_mask);
	if (!cpus_empty(cpu_mask)) {
		struct tlb_flush_param p = { .pid = pid };
		smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1);
	}
	_tlbil_pid(pid);
 no_context:
	preempt_enable();
}
Esempio n. 16
0
File: tlb_64.c Progetto: E-LLP/n900
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
			     unsigned long va)
{
	int sender;
	union smp_flush_state *f;
	cpumask_t cpumask = *cpumaskp;

	if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
		return;

	/* Caller has disabled preemption */
	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
	f = &per_cpu(flush_state, sender);

	/*
	 * Could avoid this lock when
	 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
	 * probably not worth checking this for a cache-hot lock.
	 */
	spin_lock(&f->tlbstate_lock);

	f->flush_mm = mm;
	f->flush_va = va;
	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);

	/*
	 * Make the above memory operations globally visible before
	 * sending the IPI.
	 */
	smp_mb();
	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);

	while (!cpus_empty(f->flush_cpumask))
		cpu_relax();

	f->flush_mm = NULL;
	f->flush_va = 0;
	spin_unlock(&f->tlbstate_lock);
}
Esempio n. 17
0
static int __init mips_smp_init(void)
{
    cpumask_t forbidden;
    unsigned int cpu;

    cpus_clear(forbidden);
#ifdef CONFIG_SMP
    smp_call_function(mips_setup_cpu_mask, &forbidden, 1);
#endif
    mips_setup_cpu_mask(&forbidden);
    if (cpus_empty(forbidden))
        return 0;
    perfctr_cpus_forbidden_mask = forbidden;
    for(cpu = 0; cpu < NR_CPUS; ++cpu)
        if (cpu_isset(cpu, forbidden))
            printk(" %u", cpu);
        printk("\n");
    return 0;
}
Esempio n. 18
0
int __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)
{
	unsigned long flags;
	int self;

	local_irq_save_hw(flags);

	self = cpu_isset(ipipe_processor_id(),cpumask);
	cpu_clear(ipipe_processor_id(), cpumask);

	if (!cpus_empty(cpumask))
		apic->send_IPI_mask(&cpumask, ipipe_apic_irq_vector(ipi));

	if (self)
		ipipe_trigger_irq(ipi);

	local_irq_restore_hw(flags);

	return 0;
}
Esempio n. 19
0
void ipipe_critical_exit(unsigned long flags)
{
	if (num_online_cpus() == 1) {
		hard_local_irq_restore(flags);
		return;
	}

#ifdef CONFIG_SMP
	if (atomic_dec_and_test(&__ipipe_critical_count)) {
		spin_unlock(&__ipipe_cpu_barrier);
		while (!cpus_empty(__ipipe_cpu_sync_map))
			cpu_relax();
		cpu_clear(ipipe_processor_id(), __ipipe_cpu_lock_map);
		clear_bit(0, &__ipipe_critical_lock);
		smp_mb__after_clear_bit();
	}
#endif /* CONFIG_SMP */

	hard_local_irq_restore(flags);
}
Esempio n. 20
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{
    struct mm_struct *mm = vma->vm_mm;
    cpumask_t cpu_mask;

    preempt_disable();
    cpu_mask = mm->cpu_vm_mask;
    cpu_clear(smp_processor_id(), cpu_mask);

    if (current->active_mm == mm) {
        if (current->mm)
            __flush_tlb_one(va);
        else
            leave_mm(smp_processor_id());
    }

    if (!cpus_empty(cpu_mask))
        flush_tlb_others(cpu_mask, mm, va);

    preempt_enable();
}
Esempio n. 21
0
int fastcall __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)

{
	unsigned long flags;
	ipipe_declare_cpuid;
	int self;

	ipipe_lock_cpu(flags);

	self = cpu_isset(cpuid,cpumask);
	cpu_clear(cpuid,cpumask);

	if (!cpus_empty(cpumask))
		send_IPI_mask(cpumask,ipi + FIRST_EXTERNAL_VECTOR);

	if (self)
		ipipe_trigger_irq(ipi);

	ipipe_unlock_cpu(flags);

	return 0;
}
Esempio n. 22
0
static void move_candidate_irqs(struct irq_info *info, void *data)
{
	struct load_balance_info *lb_info = data;

	/* never move an irq that has an afinity hint when 
 	 * hint_policy is HINT_POLICY_EXACT 
 	 */
	if (info->hint_policy == HINT_POLICY_EXACT)
		if (!cpus_empty(info->affinity_hint))
			return;

	/* Don't rebalance irqs that don't want it */
	if (info->level == BALANCE_NONE)
		return;

	/* Don't move cpus that only have one irq, regardless of load */
	if (g_list_length(info->assigned_obj->interrupts) <= 1)
		return;

	/* IRQs with a load of 1 have most likely not had any interrupts and
	 * aren't worth migrating
	 */
	if (info->load <= 1)
		return;

	/* If we can migrate an irq without swapping the imbalance do it. */
	if ((lb_info->adjustment_load - info->load) > (lb_info->min_load + info->load)) {
		lb_info->adjustment_load -= info->load;
		lb_info->min_load += info->load;
	} else
		return;

	log(TO_CONSOLE, LOG_INFO, "Selecting irq %d for rebalancing\n", info->irq);

	migrate_irq(&info->assigned_obj->interrupts, &rebalance_irq_list, info);

	info->assigned_obj = NULL;
}
Esempio n. 23
0
static int __vcpu_set_affinity(
    struct vcpu *v, cpumask_t *affinity,
    bool_t old_lock_status, bool_t new_lock_status)
{
    cpumask_t online_affinity, old_affinity;

    cpus_and(online_affinity, *affinity, cpu_online_map);
    if ( cpus_empty(online_affinity) )
        return -EINVAL;

    vcpu_schedule_lock_irq(v);

    if ( v->affinity_locked != old_lock_status )
    {
        BUG_ON(!v->affinity_locked);
        vcpu_schedule_unlock_irq(v);
        return -EBUSY;
    }

    v->affinity_locked = new_lock_status;

    old_affinity = v->cpu_affinity;
    v->cpu_affinity = *affinity;
    *affinity = old_affinity;
    if ( !cpu_isset(v->processor, v->cpu_affinity) )
        set_bit(_VPF_migrating, &v->pause_flags);

    vcpu_schedule_unlock_irq(v);

    if ( test_bit(_VPF_migrating, &v->pause_flags) )
    {
        vcpu_sleep_nosync(v);
        vcpu_migrate(v);
    }

    return 0;
}
Esempio n. 24
0
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
{
	cpumask_t mask;
	int cpu;
	struct irq_cfg *cfg = &irq_cfg[irq];

	BUG_ON((unsigned)irq >= NR_IRQS);
	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);

	cpus_and(mask, domain, cpu_online_map);
	if (cpus_empty(mask))
		return -EINVAL;
	if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
		return 0;
	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
		return -EBUSY;
	for_each_cpu_mask(cpu, mask)
		per_cpu(vector_irq, cpu)[vector] = irq;
	cfg->vector = vector;
	cfg->domain = domain;
	irq_status[irq] = IRQ_USED;
	cpus_or(vector_table[vector], vector_table[vector], domain);
	return 0;
}
Esempio n. 25
0
static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
{
    cpumask_t    tmp = CPU_MASK_NONE;
    unsigned long    flags;
    int        i;

    pr_debug(KERN_DEBUG "%s called\n", __func__);
    irq -= _irqbase;

    cpus_and(tmp, cpumask, cpu_online_map);
    if (cpus_empty(tmp))
        return;

    /* Assumption : cpumask refers to a single CPU */
    spin_lock_irqsave(&gic_lock, flags);
    for (;;) {
        /* Re-route this IRQ */
        GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));

        /*
         * FIXME: assumption that _intrmap is ordered and has no holes
         */

        /* Update the intr_map */
        _intrmap[irq].cpunum = first_cpu(tmp);

        /* Update the pcpu_masks */
        for (i = 0; i < NR_CPUS; i++)
            clear_bit(irq, pcpu_masks[i].pcpu_mask);
        set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);

    }
    irq_desc[irq].affinity = cpumask;
    spin_unlock_irqrestore(&gic_lock, flags);

}
Esempio n. 26
0
static int do_vperfctr_control(struct vperfctr *perfctr,
			       const struct vperfctr_control __user *argp,
			       unsigned int argbytes,
			       struct task_struct *tsk)
{
	struct vperfctr_control *control;
	int err;
	unsigned int next_cstatus;
	unsigned int nrctrs, i;

	if (!tsk) {
		return -ESRCH;	/* attempt to update unlinked perfctr */
	}

	/* The control object can be large (over 300 bytes on i386),
	   so kmalloc() it instead of storing it on the stack.
	   We must use task-private storage to prevent racing with a
	   monitor process attaching to us before the non-preemptible
	   perfctr update step. Therefore we cannot store the copy
	   in the perfctr object itself. */
	control = kmalloc(sizeof(*control), GFP_USER);
	if (!control) {
		return -ENOMEM;
	}

	err = -EINVAL;
	if (argbytes > sizeof *control) {
		goto out_kfree;
	}

	err = -EFAULT;
	if (copy_from_user(control, argp, argbytes)) {
		goto out_kfree;
	}

	if (argbytes < sizeof *control)
		memset((char*)control + argbytes, 0, sizeof *control - argbytes);

	// figure out what is happening in the following 'if' loop

	if (control->cpu_control.nractrs || control->cpu_control.nrictrs) {
		cpumask_t old_mask, new_mask;

		old_mask = tsk->cpus_allowed;
		cpus_andnot(new_mask, old_mask, perfctr_cpus_forbidden_mask);

		err = -EINVAL;
		if (cpus_empty(new_mask)) {
			goto out_kfree;
		}
		if (!cpus_equal(new_mask, old_mask))
			set_cpus_allowed(tsk, new_mask);
	}

	/* PREEMPT note: preemption is disabled over the entire
	   region since we're updating an active perfctr. */
	preempt_disable();

	// the task whose control register I am changing might actually be
	// in suspended state. That can happen when the other is executing
	// under the control of another task as in the case of debugging
	// or ptrace. However, if the write_control is done for the current
	// executing process, first suspend them and then do the update
	// why are we resetting 'perfctr->cpu_state.cstatus' ?

	if (IS_RUNNING(perfctr)) {
		if (tsk == current)
			vperfctr_suspend(perfctr);
	
		// not sure why we are zeroing out the following explicitly
		perfctr->cpu_state.cstatus = 0;
		vperfctr_clear_iresume_cstatus(perfctr);
	}

	// coying the user-specified control values to 'state'
	perfctr->cpu_state.control = control->cpu_control;

	/* remote access note: perfctr_cpu_update_control() is ok */
	err = perfctr_cpu_update_control(&perfctr->cpu_state, 0);
	if (err < 0) {
		goto out;
	}
	next_cstatus = perfctr->cpu_state.cstatus;
	if (!perfctr_cstatus_enabled(next_cstatus))
		goto out;

	/* XXX: validate si_signo? */
	perfctr->si_signo = control->si_signo;

	if (!perfctr_cstatus_has_tsc(next_cstatus))
		perfctr->cpu_state.tsc_sum = 0;

	nrctrs = perfctr_cstatus_nrctrs(next_cstatus);
	for(i = 0; i < nrctrs; ++i)
		if (!(control->preserve & (1<<i)))
			perfctr->cpu_state.pmc[i].sum = 0;

	// I am not sure why we are removing the inheritance just because
	// we updated the control information. True, because the children might
	// be performing something else. So, the control will have to be set
	// before spawning any children

	spin_lock(&perfctr->children_lock);
	perfctr->inheritance_id = new_inheritance_id();
	memset(&perfctr->children, 0, sizeof perfctr->children);
	spin_unlock(&perfctr->children_lock);

	if (tsk == current) {
		vperfctr_resume(perfctr);
	}

 out:
	preempt_enable();
 out_kfree:
	kfree(control);
	return err;
}
Esempio n. 27
0
static int sys_vperfctr_control(struct vperfctr *perfctr,
				struct perfctr_struct_buf *argp,
				struct task_struct *tsk)
{
	struct vperfctr_control control;
	int err;
	unsigned int next_cstatus;
	unsigned int nrctrs, i;
	cpumask_t cpumask;

	if (!tsk)
		return -ESRCH;	/* attempt to update unlinked perfctr */

	err = perfctr_copy_from_user(&control, argp, &vperfctr_control_sdesc);
	if (err)
		return err;

	/* Step 1: Update the control but keep the counters disabled.
	   PREEMPT note: Preemption is disabled since we're updating
	   an active perfctr. */
	preempt_disable();
	if (IS_RUNNING(perfctr)) {
		if (tsk == current)
			vperfctr_suspend(perfctr);
		perfctr->cpu_state.cstatus = 0;
		vperfctr_clear_iresume_cstatus(perfctr);
	}
	perfctr->cpu_state.control = control.cpu_control;
	/* remote access note: perfctr_cpu_update_control() is ok */
	cpus_setall(cpumask);
#ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK
	/* make a stopped vperfctr have an unconstrained cpumask */
	perfctr->cpumask = cpumask;
#endif
	err = perfctr_cpu_update_control(&perfctr->cpu_state, &cpumask);
	if (err < 0) {
		next_cstatus = 0;
	} else {
		next_cstatus = perfctr->cpu_state.cstatus;
		perfctr->cpu_state.cstatus = 0;
		perfctr->updater_tgid = current->tgid;
#ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK
		perfctr->cpumask = cpumask;
#endif
	}
	preempt_enable_no_resched();

	if (!perfctr_cstatus_enabled(next_cstatus))
		return err;

#ifdef CONFIG_PERFCTR_CPUS_FORBIDDEN_MASK
	/* Step 2: Update the task's CPU affinity mask.
	   PREEMPT note: Preemption must be enabled for set_cpus_allowed(). */
	if (control.cpu_control.nractrs || control.cpu_control.nrictrs) {
		cpumask_t old_mask, new_mask;

		old_mask = tsk->cpus_allowed;
		cpus_and(new_mask, old_mask, cpumask);

		if (cpus_empty(new_mask))
			return -EINVAL;
		if (!cpus_equal(new_mask, old_mask))
			set_cpus_allowed(tsk, new_mask);
	}
#endif

	/* Step 3: Enable the counters with the new control and affinity.
	   PREEMPT note: Preemption is disabled since we're updating
	   an active perfctr. */
	preempt_disable();

	/* We had to enable preemption above for set_cpus_allowed() so we may
	   have lost a race with a concurrent update via the remote control
	   interface. If so then we must abort our update of this perfctr. */
	if (perfctr->updater_tgid != current->tgid) {
		printk(KERN_WARNING "perfctr: control update by task %d"
		       " was lost due to race with update by task %d\n",
		       current->tgid, perfctr->updater_tgid);
		err = -EBUSY;
	} else {
		/* XXX: validate si_signo? */
		perfctr->si_signo = control.si_signo;

		perfctr->cpu_state.cstatus = next_cstatus;

		if (!perfctr_cstatus_has_tsc(next_cstatus))
			perfctr->cpu_state.tsc_sum = 0;

		nrctrs = perfctr_cstatus_nrctrs(next_cstatus);
		for(i = 0; i < nrctrs; ++i)
			if (!(control.preserve & (1<<i)))
				perfctr->cpu_state.pmc[i].sum = 0;

		perfctr->flags = control.flags;

		if (tsk == current)
			vperfctr_resume(perfctr);
	}

	preempt_enable();
	return err;
}
Esempio n. 28
0
acpi_status __init
acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
{
	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
	union acpi_object *obj;
	struct acpi_table_iosapic *iosapic;
	unsigned int gsi_base;
	int node;

	/* Only care about objects w/ a method that returns the MADT */
	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
		return AE_OK;

	if (!buffer.length || !buffer.pointer)
		return AE_OK;

	obj = buffer.pointer;
	if (obj->type != ACPI_TYPE_BUFFER ||
	    obj->buffer.length < sizeof(*iosapic)) {
		acpi_os_free(buffer.pointer);
		return AE_OK;
	}

	iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer;

	if (iosapic->header.type != ACPI_MADT_IOSAPIC) {
		acpi_os_free(buffer.pointer);
		return AE_OK;
	}

	gsi_base = iosapic->global_irq_base;

	acpi_os_free(buffer.pointer);
	buffer.length = ACPI_ALLOCATE_BUFFER;
	buffer.pointer = NULL;

	/*
	 * OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell
	 * us which node to associate this with.
	 */
	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer)))
		return AE_OK;

	if (!buffer.length || !buffer.pointer)
		return AE_OK;

	obj = buffer.pointer;

	if (obj->type != ACPI_TYPE_INTEGER ||
	    obj->integer.value >= MAX_PXM_DOMAINS) {
		acpi_os_free(buffer.pointer);
		return AE_OK;
	}

	node = pxm_to_nid_map[obj->integer.value];
	acpi_os_free(buffer.pointer);

	if (node >= MAX_NUMNODES || !node_online(node) ||
	    cpus_empty(node_to_cpumask(node)))
		return AE_OK;

	/* We know a gsi to node mapping! */
	map_iosapic_to_node(gsi_base, node);
	return AE_OK;
}
Esempio n. 29
0
int get_page_type(struct page_info *page, unsigned long type)
{
    unsigned long nx, x, y = page->u.inuse.type_info;

    ASSERT(!(type & ~PGT_type_mask));

 again:
    do {
        x  = y;
        nx = x + 1;
        if ( unlikely((nx & PGT_count_mask) == 0) )
        {
            MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
            return 0;
        }
        else if ( unlikely((x & PGT_count_mask) == 0) )
        {
            if ( (x & PGT_type_mask) != type )
            {
                /*
                 * On type change we check to flush stale TLB entries. This 
                 * may be unnecessary (e.g., page was GDT/LDT) but those 
                 * circumstances should be very rare.
                 */
                cpumask_t mask =
                    page_get_owner(page)->domain_dirty_cpumask;
                tlbflush_filter(mask, page->tlbflush_timestamp);

                if ( unlikely(!cpus_empty(mask)) )
                {
                    perfc_incr(need_flush_tlb_flush);
                    flush_tlb_mask(mask);
                }

                /* We lose existing type, back pointer, and validity. */
                nx &= ~(PGT_type_mask | PGT_validated);
                nx |= type;

                /* No special validation needed for writable pages. */
                /* Page tables and GDT/LDT need to be scanned for validity. */
                if ( type == PGT_writable_page )
                    nx |= PGT_validated;
            }
        }
        else if ( unlikely((x & PGT_type_mask) != type) )
        {
            return 0;
        }
        else if ( unlikely(!(x & PGT_validated)) )
        {
            /* Someone else is updating validation of this page. Wait... */
            while ( (y = page->u.inuse.type_info) == x )
                cpu_relax();
            goto again;
        }
    }
    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );

    if ( unlikely(!(nx & PGT_validated)) )
    {
        /* Noone else is updating simultaneously. */
        __set_bit(_PGT_validated, &page->u.inuse.type_info);
    }

    return 1;
}