Exemplo n.º 1
0
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
					     compat_ulong_t __user *user_mask_ptr)
{
	int ret;
	cpumask_t mask;
	unsigned long *k;
	unsigned int min_length = sizeof(cpumask_t);

	if (NR_CPUS <= BITS_PER_COMPAT_LONG)
		min_length = sizeof(compat_ulong_t);

	if (len < min_length)
		return -EINVAL;

	ret = sched_getaffinity(pid, &mask);
	if (ret < 0)
		return ret;

	k = cpus_addr(mask);
	ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
	if (ret)
		return ret;

	return min_length;
}
Exemplo n.º 2
0
Arquivo: irq.c Projeto: ryos36/xen-arm
void gic_raise_softirq(cpumask_t cpumask, unsigned int irq)
{
        unsigned long map = *cpus_addr(cpumask);

        /* this always happens on GIC0 */
        mmio_writel(map << 16 | irq, gic_data[0].dist_base + ICDSGIR);
}
Exemplo n.º 3
0
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
    unsigned long map = *cpus_addr(*mask);

    /* this always happens on GIC0 */
    writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
}
Exemplo n.º 4
0
/*
 * This is only used on smaller machines.
 */
void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
{
	unsigned long mask = cpus_addr(cpumask)[0];
	unsigned long cfg;
	unsigned long flags;

	local_irq_save(flags);
	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();
		
	/*
	 * prepare target chip field
	 */
	cfg = __prepare_ICR2(mask);
	apic_write_around(APIC_ICR2, cfg);
		
	/*
	 * program the ICR 
	 */
	cfg = __prepare_ICR(0, vector);
			
	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	apic_write_around(APIC_ICR, cfg);

	local_irq_restore(flags);
}
Exemplo n.º 5
0
Arquivo: smp.c Projeto: amodj/Utopia
void send_IPI_mask_flat(cpumask_t cpumask, int vector)
{
    unsigned long mask = cpus_addr(cpumask)[0];
    unsigned long cfg;
    unsigned long flags;

    /* An IPI with no target generates a send accept error from P5/P6 APICs. */
    WARN_ON(mask == 0);

    local_irq_save(flags);

    /*
     * Wait for idle.
     */
    apic_wait_icr_idle();

    /*
     * prepare target chip field
     */
    cfg = __prepare_ICR2(mask);
    apic_write_around(APIC_ICR2, cfg);

    /*
     * program the ICR
     */
    cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;

    /*
     * Send the IPI. The write to APIC_ICR fires this off.
     */
    apic_write_around(APIC_ICR, cfg);
    
    local_irq_restore(flags);
}
Exemplo n.º 6
0
/*==========================================================================*
 * Name:         flush_tlb_others
 *
 * Description:  This routine requests other CPU to execute flush TLB.
 *               1.Setup parameters.
 *               2.Send 'INVALIDATE_TLB_IPI' to other CPU.
 *                 Request other CPU to execute 'smp_invalidate_interrupt()'.
 *               3.Wait for other CPUs operation finished.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    cpumask - bitmap of target CPUs
 *               *mm -  a pointer to the mm struct for flush TLB
 *               *vma -  a pointer to the vma struct include va
 *               va - virtual address for flush TLB
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
	struct vm_area_struct *vma, unsigned long va)
{
	unsigned long *mask;
#ifdef DEBUG_SMP
	unsigned long flags;
	__save_flags(flags);
	if (!(flags & 0x0040))	/* Interrupt Disable NONONO */
		BUG();
#endif /* DEBUG_SMP */

	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - we do not send IPIs to not-yet booted CPUs.
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	BUG_ON(cpus_empty(cpumask));

	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
	BUG_ON(!mm);

	/* If a CPU which we ran on has gone down, OK. */
	cpus_and(cpumask, cpumask, cpu_online_map);
	if (cpus_empty(cpumask))
		return;

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_vma = vma;
	flush_va = va;
	mask=cpus_addr(cpumask);
	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);

	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);

	while (!cpus_empty(flush_cpumask)) {
		/* nothing. lockup detection does not belong here */
		mb();
	}

	flush_mm = NULL;
	flush_vma = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Exemplo n.º 7
0
/*
 * This is only used on smaller machines.
 */
void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
{
    unsigned long mask = cpus_addr(cpumask)[0];
    unsigned long flags;

    local_irq_save(flags);
    WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
    __send_IPI_dest_field(mask, vector);
    local_irq_restore(flags);
}
Exemplo n.º 8
0
void irq_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
    unsigned long map = *cpus_addr(*mask);
    int satt, cpu, cpu_bmask;
    u32 val;

    satt = 1 << 15;

    /* 
     * NoteXXX: CPU1 SGI is configured as secure as default.
     *          Need to use the secure SGI 1 which is for waking up cpu1.
     */
    if (irq == CPU_BRINGUP_SGI)
    {
        if (irq_total_secondary_cpus)
        {
            --irq_total_secondary_cpus;
            satt = 0;
        }
    }
    
    val = readl(GIC_ICDISR + 4 * (irq / 32));
    if (!(val & (1 << (irq % 32)))) {   /*  secure interrupt? */
        satt = 0;
    }

    cpu = 0;
    cpu_bmask = 0;

#if defined(SPM_MCDI_FUNC)
    /*
     * Processors cannot receive interrupts during power-down.
     * Wait until the SPM checks status and returns. 
     */
    for_each_cpu(cpu, mask) {
        cpu_bmask |= 1 << cpu;
    }
    spm_check_core_status_before(cpu_bmask);
#endif

    /*
     * Ensure that stores to Normal memory are visible to the
     * other CPUs before issuing the IPI.
     */
    dsb();
    *(volatile u32 *)(GIC_DIST_BASE + 0xf00) = (map << 16) | satt | irq;
    dsb();

#if defined(SPM_MCDI_FUNC)
    spm_check_core_status_after(cpu_bmask);
#endif

}
Exemplo n.º 9
0
static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
				    unsigned len, cpumask_t *new_mask)
{
	unsigned long *k;

	if (len < sizeof(cpumask_t))
		memset(new_mask, 0, sizeof(cpumask_t));
	else if (len > sizeof(cpumask_t))
		len = sizeof(cpumask_t);

	k = cpus_addr(*new_mask);
	return compat_get_bitmap(k, user_mask_ptr, len * 8);
}
Exemplo n.º 10
0
/*
 * This is only used on smaller machines.
 */
void send_IPI_mask_bitmask(cpumask_t mask, int vector)
{
    unsigned long flags;
    unsigned int cpu;

    local_irq_save(flags);
    WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);

    for (cpu = 0; cpu < NR_CPUS; ++cpu) {
        if (cpu_isset(cpu, mask)) {
            __send_IPI_one(cpu, vector);
        }
    }

    local_irq_restore(flags);
}
Exemplo n.º 11
0
/*==========================================================================*
 * Name:         smp_flush_cache_all
 *
 * Description:  This routine sends a 'INVALIDATE_CACHE_IPI' to all other
 *               CPUs in the system.
 *
 * Born on Date: 2003-05-28
 *
 * Arguments:    NONE
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
void smp_flush_cache_all(void)
{
	cpumask_t cpumask;
	unsigned long *mask;

	preempt_disable();
	cpumask = cpu_online_map;
	cpu_clear(smp_processor_id(), cpumask);
	spin_lock(&flushcache_lock);
	mask=cpus_addr(cpumask);
	atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
	send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
	_flush_cache_copyback_all();
	while (flushcache_cpumask)
		mb();
	spin_unlock(&flushcache_lock);
	preempt_enable();
}
Exemplo n.º 12
0
static void gic_raise_softirq_non_secure(const struct cpumask *mask, unsigned int irq)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
	unsigned long map = *cpus_addr(*mask);
#else
	int cpu;
	unsigned long map = 0;

	/* Convert our logical CPU mask into a physical one. */
	for_each_cpu(cpu, mask)
		map |= 1 << cpu_logical_map(cpu);
#endif

	/*
	 * Ensure that stores to Normal memory are visible to the
	 * other CPUs before issuing the IPI.
	 */
	dsb();

	/* this always happens on GIC0 */
	writel_relaxed(map << 16 | irq | 0x8000, GIC_DIST_BASE + GIC_DIST_SOFTINT);
}
Exemplo n.º 13
0
static void raise_mce(struct mce *m)
{
	int context = MCJ_CTX(m->inject_flags);

	inject_mce(m);

	if (context == MCJ_CTX_RANDOM)
		return;

#ifdef CONFIG_X86_LOCAL_APIC
	if (m->inject_flags & MCJ_NMI_BROADCAST) {
		unsigned long start;
		int cpu;
		get_online_cpus();
		mce_inject_cpumask = cpu_online_map;
		cpu_clear(get_cpu(), mce_inject_cpumask);
		for_each_online_cpu(cpu) {
			struct mce *mcpu = &per_cpu(injectm, cpu);
			if (!mcpu->finished ||
			    MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
				cpu_clear(cpu, mce_inject_cpumask);
		}
		if (!cpus_empty(mce_inject_cpumask))
			apic->send_IPI_mask(&mce_inject_cpumask, NMI_VECTOR);
		start = jiffies;
		while (!cpus_empty(mce_inject_cpumask)) {
			if (!time_before(jiffies, start + 2*HZ)) {
				printk(KERN_ERR
				"Timeout waiting for mce inject NMI %lx\n",
					*cpus_addr(mce_inject_cpumask));
				break;
			}
			cpu_relax();
		}
		raise_local();
		put_cpu();
		put_online_cpus();
	} else
Exemplo n.º 14
0
unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask)
{
	return cpus_addr(*cpumask)[0]&0xFF;
}
Exemplo n.º 15
0
void gic_raise_softirq(cpumask_t cpumask, unsigned int irq)
{
    unsigned long map = *cpus_addr(cpumask);

    writel(map << 16 | irq, gic_dist_base + GIC_DIST_SOFTINT);
}