Example #1
0
static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
                                      int psize, int ssize, int local)
{
    unsigned long want_v;
    unsigned long lpar_rc;
    u64 dummy1, dummy2;
    unsigned long flags;

    DBG_LOW("    inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
            slot, va, psize, local);
    want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);

    atomic_spin_lock_irqsave(&beat_htab_lock, flags);
    dummy1 = beat_lpar_hpte_getword0(slot);

    if ((dummy1 & ~0x7FUL) != (want_v & ~0x7FUL)) {
        DBG_LOW("not found !\n");
        atomic_spin_unlock_irqrestore(&beat_htab_lock, flags);
        return;
    }

    lpar_rc = beat_write_htab_entry(0, slot, 0, 0, HPTE_V_VALID, 0,
                                    &dummy1, &dummy2);
    atomic_spin_unlock_irqrestore(&beat_htab_lock, flags);

    BUG_ON(lpar_rc != 0);
}
Example #2
0
/*
 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
 * the low 3 bits of flags happen to line up.  So no transform is needed.
 * We can probably optimize here and assume the high bits of newpp are
 * already zero.  For now I am paranoid.
 */
static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
                                       unsigned long newpp,
                                       unsigned long va,
                                       int psize, int ssize, int local)
{
    unsigned long lpar_rc;
    unsigned long want_v;
    unsigned long pss;

    want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
    pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;

    DBG_LOW("    update: "
            "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
            want_v & HPTE_V_AVPN, slot, psize, newpp);

    lpar_rc = beat_update_htab_permission3(0, slot, want_v, pss, 7, newpp);

    if (lpar_rc == 0xfffffff7) {
        DBG_LOW("not found !\n");
        return -1;
    }

    DBG_LOW("ok\n");

    BUG_ON(lpar_rc != 0);

    return 0;
}
Example #3
0
static long beat_lpar_hpte_insert(unsigned long hpte_group,
                  unsigned long va, unsigned long pa,
                  unsigned long rflags, unsigned long vflags,
                  int psize, int ssize)
{
    unsigned long lpar_rc;
    unsigned long slot;
    unsigned long hpte_v, hpte_r;

    /* same as iseries */
    if (vflags & HPTE_V_SECONDARY)
        return -1;

    if (!(vflags & HPTE_V_BOLTED))
        DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
            "rflags=%lx, vflags=%lx, psize=%d)\n",
        hpte_group, va, pa, rflags, vflags, psize);

    hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
        vflags | HPTE_V_VALID;
    hpte_r = hpte_encode_r(pa, psize) | rflags;

    if (!(vflags & HPTE_V_BOLTED))
        DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);

    if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE))
        hpte_r &= ~_PAGE_COHERENT;

    spin_lock(&beat_htab_lock);
    lpar_rc = beat_read_mask(hpte_group);
    if (lpar_rc == 0) {
        if (!(vflags & HPTE_V_BOLTED))
            DBG_LOW(" full\n");
        spin_unlock(&beat_htab_lock);
        return -1;
    }

    lpar_rc = beat_insert_htab_entry(0, hpte_group, lpar_rc << 48,
        hpte_v, hpte_r, &slot);
    spin_unlock(&beat_htab_lock);

    /*
     * Since we try and ioremap PHBs we don't own, the pte insert
     * will fail. However we must catch the failure in hash_page
     * or we will loop forever, so return -2 in this case.
     */
    if (unlikely(lpar_rc != 0)) {
        if (!(vflags & HPTE_V_BOLTED))
            DBG_LOW(" lpar err %lx\n", lpar_rc);
        return -2;
    }
    if (!(vflags & HPTE_V_BOLTED))
        DBG_LOW(" -> slot: %lx\n", slot);

    /* We have to pass down the secondary bucket bit here as well */
    return (slot ^ hpte_group) & 15;
}
Example #4
0
void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local)
{
	unsigned long hash, index, shift, hidx, slot;

	DBG_LOW("flush_hash_page(va=%016x)\n", va);
	pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
		hash = hpt_hash(va, shift);
		hidx = __rpte_to_hidx(pte, index);
		if (hidx & _PTEIDX_SECONDARY)
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot += hidx & _PTEIDX_GROUP_IX;
		DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
		ppc_md.hpte_invalidate(slot, va, psize, local);
	} pte_iterate_hashed_end();
Example #5
0
static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
				  unsigned long vpn, unsigned long pa,
				  unsigned long rflags, unsigned long vflags,
				  int psize, int ssize)
{
	unsigned long lpar_rc;
	u64 hpte_v, hpte_r, slot;

	if (vflags & HPTE_V_SECONDARY)
		return -1;

	if (!(vflags & HPTE_V_BOLTED))
		DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
			"rflags=%lx, vflags=%lx, psize=%d)\n",
		hpte_group, vpn, pa, rflags, vflags, psize);

	hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
		vflags | HPTE_V_VALID;
	hpte_r = hpte_encode_r(pa, psize) | rflags;

	if (!(vflags & HPTE_V_BOLTED))
		DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);

	if (rflags & _PAGE_NO_CACHE)
		hpte_r &= ~_PAGE_COHERENT;

	/* insert into not-volted entry */
	lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r,
		HPTE_V_BOLTED, 0, &slot);
	/*
	 * Since we try and ioremap PHBs we don't own, the pte insert
	 * will fail. However we must catch the failure in hash_page
	 * or we will loop forever, so return -2 in this case.
	 */
	if (unlikely(lpar_rc != 0)) {
		if (!(vflags & HPTE_V_BOLTED))
			DBG_LOW(" lpar err %lx\n", lpar_rc);
		return -2;
	}
	if (!(vflags & HPTE_V_BOLTED))
		DBG_LOW(" -> slot: %lx\n", slot);

	/* We have to pass down the secondary bucket bit here as well */
	return (slot ^ hpte_group) & 15;
}
Example #6
0
void hash_preload(struct mm_struct *mm, unsigned long ea,
		  unsigned long access, unsigned long trap)
{
	unsigned long vsid;
	void *pgdir;
	pte_t *ptep;
	cpumask_t mask;
	unsigned long flags;
	int local = 0;

	/* We don't want huge pages prefaulted for now
	 */
	if (unlikely(in_hugepage_area(mm->context, ea)))
		return;

	DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
		" trap=%lx\n", mm, mm->pgd, ea, access, trap);

	/* Get PTE, VSID, access mask */
	pgdir = mm->pgd;
	if (pgdir == NULL)
		return;
	ptep = find_linux_pte(pgdir, ea);
	if (!ptep)
		return;
	vsid = get_vsid(mm->context.id, ea);

	/* Hash it in */
	local_irq_save(flags);
	mask = cpumask_of_cpu(smp_processor_id());
	if (cpus_equal(mm->cpu_vm_mask, mask))
		local = 1;
#ifndef CONFIG_PPC_64K_PAGES
	__hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
	if (mmu_ci_restrictions) {
		/* If this PTE is non-cacheable, switch to 4k */
		if (mm->context.user_psize == MMU_PAGE_64K &&
		    (pte_val(*ptep) & _PAGE_NO_CACHE)) {
			mm->context.user_psize = MMU_PAGE_4K;
			mm->context.sllp = SLB_VSID_USER |
				mmu_psize_defs[MMU_PAGE_4K].sllp;
			get_paca()->context = mm->context;
			slb_flush_and_rebolt();
		}
	}
	if (mm->context.user_psize == MMU_PAGE_64K)
		__hash_page_64K(ea, access, vsid, ptep, trap, local);
	else
		__hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */
	local_irq_restore(flags);
}
Example #7
0
/*
 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
 * the low 3 bits of flags happen to line up.  So no transform is needed.
 * We can probably optimize here and assume the high bits of newpp are
 * already zero.  For now I am paranoid.
 */
static long beat_lpar_hpte_updatepp(unsigned long slot,
				    unsigned long newpp,
				    unsigned long vpn,
				    int psize, int apsize,
				    int ssize, unsigned long flags)
{
	unsigned long lpar_rc;
	u64 dummy0, dummy1;
	unsigned long want_v;

	want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);

	DBG_LOW("    update: "
		"avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
		want_v & HPTE_V_AVPN, slot, psize, newpp);

	raw_spin_lock(&beat_htab_lock);
	dummy0 = beat_lpar_hpte_getword0(slot);
	if ((dummy0 & ~0x7FUL) != (want_v & ~0x7FUL)) {
		DBG_LOW("not found !\n");
		raw_spin_unlock(&beat_htab_lock);
		return -1;
	}

	lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0,
					&dummy1);
	raw_spin_unlock(&beat_htab_lock);
	if (lpar_rc != 0 || dummy0 == 0) {
		DBG_LOW("not found !\n");
		return -1;
	}

	DBG_LOW("ok %lx %lx\n", dummy0, dummy1);

	BUG_ON(lpar_rc != 0);

	return 0;
}
Example #8
0
static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va,
        int psize, int ssize, int local)
{
    unsigned long want_v;
    unsigned long lpar_rc;
    unsigned long pss;

    DBG_LOW("    inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
            slot, va, psize, local);
    want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
    pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;

    lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);

    /* E_busy can be valid output: page may be already replaced */
    BUG_ON(lpar_rc != 0 && lpar_rc != 0xfffffff7);
}
Example #9
0
static long beat_lpar_hpte_remove(unsigned long hpte_group)
{
    DBG_LOW("hpte_remove(group=%lx)\n", hpte_group);
    return -1;
}
Example #10
0
/* Result code is:
 *  0 - handled
 *  1 - normal page fault
 * -1 - critical hash insertion error
 */
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
{
	void *pgdir;
	unsigned long vsid;
	struct mm_struct *mm;
	pte_t *ptep;
	cpumask_t tmp;
	int rc, user_region = 0, local = 0;
	int psize;

	DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
		ea, access, trap);

	if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
		DBG_LOW(" out of pgtable range !\n");
 		return 1;
	}

	/* Get region & vsid */
 	switch (REGION_ID(ea)) {
	case USER_REGION_ID:
		user_region = 1;
		mm = current->mm;
		if (! mm) {
			DBG_LOW(" user region with no mm !\n");
			return 1;
		}
		vsid = get_vsid(mm->context.id, ea);
		psize = mm->context.user_psize;
		break;
	case VMALLOC_REGION_ID:
		mm = &init_mm;
		vsid = get_kernel_vsid(ea);
		if (ea < VMALLOC_END)
			psize = mmu_vmalloc_psize;
		else
			psize = mmu_io_psize;
		break;
	default:
		/* Not a valid range
		 * Send the problem up to do_page_fault 
		 */
		return 1;
	}
	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);

	/* Get pgdir */
	pgdir = mm->pgd;
	if (pgdir == NULL)
		return 1;

	/* Check CPU locality */
	tmp = cpumask_of_cpu(smp_processor_id());
	if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
		local = 1;

	/* Handle hugepage regions */
	if (unlikely(in_hugepage_area(mm->context, ea))) {
		DBG_LOW(" -> huge page !\n");
		return hash_huge_page(mm, access, ea, vsid, local, trap);
	}

	/* Get PTE and page size from page tables */
	ptep = find_linux_pte(pgdir, ea);
	if (ptep == NULL || !pte_present(*ptep)) {
		DBG_LOW(" no PTE !\n");
		return 1;
	}

#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
#else
	DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
		pte_val(*(ptep + PTRS_PER_PTE)));
#endif
	/* Pre-check access permissions (will be re-checked atomically
	 * in __hash_page_XX but this pre-check is a fast path
	 */
	if (access & ~pte_val(*ptep)) {
		DBG_LOW(" no access !\n");
		return 1;
	}

	/* Do actual hashing */
#ifndef CONFIG_PPC_64K_PAGES
	rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
	if (mmu_ci_restrictions) {
		/* If this PTE is non-cacheable, switch to 4k */
		if (psize == MMU_PAGE_64K &&
		    (pte_val(*ptep) & _PAGE_NO_CACHE)) {
			if (user_region) {
				psize = MMU_PAGE_4K;
				mm->context.user_psize = MMU_PAGE_4K;
				mm->context.sllp = SLB_VSID_USER |
					mmu_psize_defs[MMU_PAGE_4K].sllp;
			} else if (ea < VMALLOC_END) {
				/*
				 * some driver did a non-cacheable mapping
				 * in vmalloc space, so switch vmalloc
				 * to 4k pages
				 */
				printk(KERN_ALERT "Reducing vmalloc segment "
				       "to 4kB pages because of "
				       "non-cacheable mapping\n");
				psize = mmu_vmalloc_psize = MMU_PAGE_4K;
			}
		}
		if (user_region) {
			if (psize != get_paca()->context.user_psize) {
				get_paca()->context = mm->context;
				slb_flush_and_rebolt();
			}
		} else if (get_paca()->vmalloc_sllp !=
			   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
			get_paca()->vmalloc_sllp =
				mmu_psize_defs[mmu_vmalloc_psize].sllp;
			slb_flush_and_rebolt();
		}
	}
	if (psize == MMU_PAGE_64K)
		rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
	else
		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */

#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
#else
	DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
		pte_val(*(ptep + PTRS_PER_PTE)));
#endif
	DBG_LOW(" -> rc=%d\n", rc);
	return rc;
}