Beispiel #1
0
static long beat_lpar_hpte_insert(unsigned long hpte_group,
                                  unsigned long va, unsigned long pa,
                                  unsigned long rflags, unsigned long vflags,
                                  int psize, int ssize)
{
    unsigned long lpar_rc;
    u64 hpte_v, hpte_r, slot;

    /* same as iseries */
    if (vflags & HPTE_V_SECONDARY)
        return -1;

    if (!(vflags & HPTE_V_BOLTED))
        DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
                "rflags=%lx, vflags=%lx, psize=%d)\n",
                hpte_group, va, pa, rflags, vflags, psize);

    hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
             vflags | HPTE_V_VALID;
    hpte_r = hpte_encode_r(pa, psize) | rflags;

    if (!(vflags & HPTE_V_BOLTED))
        DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);

    if (rflags & _PAGE_NO_CACHE)
        hpte_r &= ~_PAGE_COHERENT;

    atomic_spin_lock(&beat_htab_lock);
    lpar_rc = beat_read_mask(hpte_group);
    if (lpar_rc == 0) {
        if (!(vflags & HPTE_V_BOLTED))
            DBG_LOW(" full\n");
        atomic_spin_unlock(&beat_htab_lock);
        return -1;
    }

    lpar_rc = beat_insert_htab_entry(0, hpte_group, lpar_rc << 48,
                                     hpte_v, hpte_r, &slot);
    atomic_spin_unlock(&beat_htab_lock);

    /*
     * Since we try and ioremap PHBs we don't own, the pte insert
     * will fail. However we must catch the failure in hash_page
     * or we will loop forever, so return -2 in this case.
     */
    if (unlikely(lpar_rc != 0)) {
        if (!(vflags & HPTE_V_BOLTED))
            DBG_LOW(" lpar err %lx\n", lpar_rc);
        return -2;
    }
    if (!(vflags & HPTE_V_BOLTED))
        DBG_LOW(" -> slot: %lx\n", slot);

    /* We have to pass down the secondary bucket bit here as well */
    return (slot ^ hpte_group) & 15;
}
Beispiel #2
0
/* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
void fixup_irqs(void)
{
	unsigned int irq;
	static int warned;
	struct irq_desc *desc;

	for_each_irq_desc(irq, desc) {
		int break_affinity = 0;
		int set_affinity = 1;
		const struct cpumask *affinity;

		if (!desc)
			continue;
		if (irq == 2)
			continue;

		/* interrupt's are disabled at this point */
		atomic_spin_lock(&desc->lock);

		affinity = desc->affinity;
		if (!irq_has_action(irq) ||
		    cpumask_equal(affinity, cpu_online_mask)) {
			atomic_spin_unlock(&desc->lock);
			continue;
		}

		if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
			break_affinity = 1;
			affinity = cpu_all_mask;
		}

		if (desc->chip->mask)
			desc->chip->mask(irq);

		if (desc->chip->set_affinity)
			desc->chip->set_affinity(irq, affinity);
		else if (!(warned++))
			set_affinity = 0;

		if (desc->chip->unmask)
			desc->chip->unmask(irq);

		atomic_spin_unlock(&desc->lock);

		if (break_affinity && set_affinity)
			printk("Broke affinity for irq %i\n", irq);
		else if (!set_affinity)
			printk("Cannot set affinity for irq %i\n", irq);
	}
Beispiel #3
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
	struct cpumask *cpu_mask;
	unsigned int pid;

	preempt_disable();
	pid = vma ? vma->vm_mm->context.id : 0;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto bail;
	cpu_mask = mm_cpumask(vma->vm_mm);
	if (!cpumask_equal(cpu_mask, cpumask_of(smp_processor_id()))) {
		/* If broadcast tlbivax is supported, use it */
		if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
			int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
			if (lock)
				atomic_spin_lock(&tlbivax_lock);
			_tlbivax_bcast(vmaddr, pid);
			if (lock)
				atomic_spin_unlock(&tlbivax_lock);
			goto bail;
		} else {
			struct tlb_flush_param p = { .pid = pid, .addr = vmaddr };
			/* Ignores smp_processor_id() even if set in cpu_mask */
			smp_call_function_many(cpu_mask,
					       do_flush_tlb_page_ipi, &p, 1);
		}
	}
	_tlbil_va(vmaddr, pid);
 bail:
	preempt_enable();
}
Beispiel #4
0
/*
 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
 * the low 3 bits of flags happen to line up.  So no transform is needed.
 * We can probably optimize here and assume the high bits of newpp are
 * already zero.  For now I am paranoid.
 */
static long beat_lpar_hpte_updatepp(unsigned long slot,
                                    unsigned long newpp,
                                    unsigned long va,
                                    int psize, int ssize, int local)
{
    unsigned long lpar_rc;
    u64 dummy0, dummy1;
    unsigned long want_v;

    want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);

    DBG_LOW("    update: "
            "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
            want_v & HPTE_V_AVPN, slot, psize, newpp);

    atomic_spin_lock(&beat_htab_lock);
    dummy0 = beat_lpar_hpte_getword0(slot);
    if ((dummy0 & ~0x7FUL) != (want_v & ~0x7FUL)) {
        DBG_LOW("not found !\n");
        atomic_spin_unlock(&beat_htab_lock);
        return -1;
    }

    lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0,
                                    &dummy1);
    atomic_spin_unlock(&beat_htab_lock);
    if (lpar_rc != 0 || dummy0 == 0) {
        DBG_LOW("not found !\n");
        return -1;
    }

    DBG_LOW("ok %lx %lx\n", dummy0, dummy1);

    BUG_ON(lpar_rc != 0);

    return 0;
}
Beispiel #5
0
/*
 * Acknowledge the IRQ using either the PCI host bridge's interrupt
 * acknowledge feature or poll.  How i8259_init() is called determines
 * which is called.  It should be noted that polling is broken on some
 * IBM and Motorola PReP boxes so we must use the int-ack feature on them.
 */
unsigned int i8259_irq(void)
{
	int irq;
	int lock = 0;

	/* Either int-ack or poll for the IRQ */
	if (pci_intack)
		irq = readb(pci_intack);
	else {
		atomic_spin_lock(&i8259_lock);
		lock = 1;

		/* Perform an interrupt acknowledge cycle on controller 1. */
		outb(0x0C, 0x20);		/* prepare for poll */
		irq = inb(0x20) & 7;
		if (irq == 2 ) {
			/*
			 * Interrupt is cascaded so perform interrupt
			 * acknowledge on controller 2.
			 */
			outb(0x0C, 0xA0);	/* prepare for poll */
			irq = (inb(0xA0) & 7) + 8;
		}
	}

	if (irq == 7) {
		/*
		 * This may be a spurious interrupt.
		 *
		 * Read the interrupt status register (ISR). If the most
		 * significant bit is not set then there is no valid
		 * interrupt.
		 */
		if (!pci_intack)
			outb(0x0B, 0x20);	/* ISR register */
		if(~inb(0x20) & 0x80)
			irq = NO_IRQ;
	} else if (irq == 0xff)
		irq = NO_IRQ;

	if (lock)
		atomic_spin_unlock(&i8259_lock);
	return irq;
}
static unsigned int steal_context_smp(unsigned int id)
{
	struct mm_struct *mm;
	unsigned int cpu, max;

	max = last_context - first_context;

	/* Attempt to free next_context first and then loop until we manage */
	while (max--) {
		/* Pick up the victim mm */
		mm = context_mm[id];

		/* We have a candidate victim, check if it's active, on SMP
		 * we cannot steal active contexts
		 */
		if (mm->context.active) {
			id++;
			if (id > last_context)
				id = first_context;
			continue;
		}
		pr_devel("[%d] steal context %d from mm @%p\n",
			 smp_processor_id(), id, mm);

		/* Mark this mm has having no context anymore */
		mm->context.id = MMU_NO_CONTEXT;

		/* Mark it stale on all CPUs that used this mm */
		for_each_cpu(cpu, mm_cpumask(mm))
			__set_bit(id, stale_map[cpu]);
		return id;
	}

	/* This will happen if you have more CPUs than available contexts,
	 * all we can do here is wait a bit and try again
	 */
	atomic_spin_unlock(&context_lock);
	cpu_relax();
	atomic_spin_lock(&context_lock);

	/* This will cause the caller to try again */
	return MMU_NO_CONTEXT;
}
Beispiel #7
0
static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
        unsigned long ea,
        int psize, int ssize)
{
    unsigned long lpar_rc, slot, vsid, va;
    u64 dummy0, dummy1;

    vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
    va = (vsid << 28) | (ea & 0x0fffffff);

    atomic_spin_lock(&beat_htab_lock);
    slot = beat_lpar_hpte_find(va, psize);
    BUG_ON(slot == -1);

    lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
                                    &dummy0, &dummy1);
    atomic_spin_unlock(&beat_htab_lock);

    BUG_ON(lpar_rc != 0);
}
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
	unsigned int id, cpu = smp_processor_id();
	unsigned long *map;

	/* No lockless fast path .. yet */
	atomic_spin_lock(&context_lock);

#ifndef DEBUG_STEAL_ONLY
	pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n",
		 cpu, next, next->context.active, next->context.id);
#endif

#ifdef CONFIG_SMP
	/* Mark us active and the previous one not anymore */
	next->context.active++;
	if (prev) {
#ifndef DEBUG_STEAL_ONLY
		pr_devel(" old context %p active was: %d\n",
			 prev, prev->context.active);
#endif
		WARN_ON(prev->context.active < 1);
		prev->context.active--;
	}

 again:
#endif /* CONFIG_SMP */

	/* If we already have a valid assigned context, skip all that */
	id = next->context.id;
	if (likely(id != MMU_NO_CONTEXT))
		goto ctxt_ok;

	/* We really don't have a context, let's try to acquire one */
	id = next_context;
	if (id > last_context)
		id = first_context;
	map = context_map;

	/* No more free contexts, let's try to steal one */
	if (nr_free_contexts == 0) {
#ifdef CONFIG_SMP
		if (num_online_cpus() > 1) {
			id = steal_context_smp(id);
			if (id == MMU_NO_CONTEXT)
				goto again;
			goto stolen;
		}
#endif /* CONFIG_SMP */
		id = steal_context_up(id);
		goto stolen;
	}
	nr_free_contexts--;

	/* We know there's at least one free context, try to find it */
	while (__test_and_set_bit(id, map)) {
		id = find_next_zero_bit(map, last_context+1, id);
		if (id > last_context)
			id = first_context;
	}
 stolen:
	next_context = id + 1;
	context_mm[id] = next;
	next->context.id = id;

#ifndef DEBUG_STEAL_ONLY
	pr_devel("[%d] picked up new id %d, nrf is now %d\n",
		 cpu, id, nr_free_contexts);
#endif

	context_check_map();
 ctxt_ok:

	/* If that context got marked stale on this CPU, then flush the
	 * local TLB for it and unmark it before we use it
	 */
	if (test_bit(id, stale_map[cpu])) {
		pr_devel("[%d] flushing stale context %d for mm @%p !\n",
			 cpu, id, next);
		local_flush_tlb_mm(next);

		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
		__clear_bit(id, stale_map[cpu]);
	}

	/* Flick the MMU and release lock */
	set_context(id, next->pgd);
	atomic_spin_unlock(&context_lock);
}
Beispiel #9
0
/*
 * Recovery handler for misrouted interrupts.
 */
static int try_one_irq(int irq, struct irq_desc *desc)
{
	struct irqaction *action;
	int ok = 0, work = 0;

	atomic_spin_lock(&desc->lock);
	/* Already running on another processor */
	if (desc->status & IRQ_INPROGRESS) {
		/*
		 * Already running: If it is shared get the other
		 * CPU to go looking for our mystery interrupt too
		 */
		if (desc->action && (desc->action->flags & IRQF_SHARED))
			desc->status |= IRQ_PENDING;
		atomic_spin_unlock(&desc->lock);
		return ok;
	}
	/* Honour the normal IRQ locking */
	desc->status |= IRQ_INPROGRESS;
	action = desc->action;
	atomic_spin_unlock(&desc->lock);

	while (action) {
		/* Only shared IRQ handlers are safe to call */
		if (action->flags & IRQF_SHARED) {
			if (action->handler(irq, action->dev_id) ==
				IRQ_HANDLED)
				ok = 1;
		}
		action = action->next;
	}

	/* Now clean up the flags */
	atomic_spin_lock_irq(&desc->lock);
	action = desc->action;

	/*
	 * While we were looking for a fixup someone queued a real
	 * IRQ clashing with our walk:
	 */
	while ((desc->status & IRQ_PENDING) && action) {
		/*
		 * Perform real IRQ processing for the IRQ we deferred
		 */
		work = 1;
		atomic_spin_unlock(&desc->lock);
		handle_IRQ_event(irq, action);
		atomic_spin_lock(&desc->lock);
		desc->status &= ~IRQ_PENDING;
	}
	desc->status &= ~IRQ_INPROGRESS;
	/*
	 * If we did actual work for the real IRQ line we must let the
	 * IRQ controller clean up too
	 */
	if (work && desc->chip && desc->chip->end)
		desc->chip->end(irq);
	atomic_spin_unlock(&desc->lock);

	return ok;
}